file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
ib.py | """Example Extorter, useful as a starting point"""
import typing
import logging
import dataclasses
import datetime
# 3rdparty
import slugify
# We use ibflex
from ibflex import parser, FlexStatement, CashAction
from coolbeans.extort.base import ExtortionProtocol
from coolbeans.tools.seeds import Trade, Transfer, Expense, Income, EventDetail
logger = logging.getLogger(__name__)
def trade_key(trade):
if trade.openCloseIndicator:
o = trade.openCloseIndicator.name + ':'
else:
o = ''
return f"{o}{trade.tradeDate.strftime('%Y-%m-%d')}:{trade.ibOrderID}"
def clean_symbol(symbol: str) -> str:
symbol = slugify.slugify(symbol, separator='_')
if symbol[0].isdigit():
symbol = "X" + symbol
symbol = symbol.upper()
return symbol
class Extorter(ExtortionProtocol):
FILE_OPEN_MODE = None # This requires a file-name, not a
ib_account_id = ""
def extort(self, stream: typing.Union[typing.IO[typing.AnyStr], str]):
"""Extract as much information as possible from the workbook"""
for statement in parser.parse(stream).FlexStatements:
for record in self.extract_cash(statement):
yield dataclasses.asdict(record)
for trade in self.extract_trades(statement):
yield dataclasses.asdict(trade)
@staticmethod
def | (statement: FlexStatement):
"""
Args:
statement: The Statement to extract entries from
Returns:
iterator of DataClass instances for these records
"""
for record in statement.CashTransactions:
date = record.dateTime
if record.type in (
CashAction.DEPOSITWITHDRAW,
):
yield Transfer(
id=record.transactionID,
date=date,
amount=record.amount,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
event_detail=EventDetail.TRANSFER_DEPOSIT.name if record.amount > 0 else EventDetail.TRANSFER_WITHDRAWAL.name,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
elif record.amount < 0:
event_detail = EventDetail.EXPENSE_FEES
if record.type in (CashAction.BONDINTPAID, CashAction.BROKERINTPAID):
event_detail = EventDetail.EXPENSE_INTEREST
if record.type == CashAction.WHTAX:
event_detail = EventDetail.EXPENSE_TAX
yield Expense(
id=record.transactionID,
date=date,
amount=record.amount,
event_detail=event_detail,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
else:
yield Income(
id=record.transactionID,
date=date,
amount=record.amount,
currency=record.currency,
subaccount=record.accountId,
narration=record.description,
meta={
'type': record.type.value,
'rate': record.fxRateToBase
}
)
@staticmethod
def extract_trades(statement: FlexStatement):
"""Pull Trades from a FlexStatement
"""
by_order: typing.Dict[str, Trade] = {}
for trade in statement.Trades:
key = trade_key(trade)
assert key.strip(), f"Invalid Key {len(key)}"
if not trade.openCloseIndicator:
# This isn't a trade at all.
continue
if key in by_order:
combined = by_order[key]
combined.add_trade(
quantity=trade.quantity * trade.multiplier,
price=trade.tradePrice,
fees=trade.ibCommission
)
else:
seed = Trade(
id=key,
date=trade.tradeDate,
price=trade.tradePrice,
currency=trade.currency,
quantity=trade.quantity * trade.multiplier,
commodity=clean_symbol(trade.symbol),
fees=trade.ibCommission,
fees_currency=trade.ibCommissionCurrency,
subaccount=trade.accountId,
event_detail=EventDetail.TRADE_OPEN if trade.openCloseIndicator.name == 'OPEN' else EventDetail.TRADE_CLOSE,
meta={
'exchange': trade.exchange,
'symbol': trade.symbol,
}
)
by_order[key] = seed
for trade in by_order.values():
yield trade
# if trade.securityID is None and "." in trade.symbol:
# # FOREX Trade, not really a valid Symbol at all
# # TODO: Better check than blank securityID
# # Usually [currency].[commodity]. For example GBP.JPY
# # In that case trade.currency is JPY, so we just need to parse out the GBP part
# safe_symbol, _ = trade.symbol.split('.')
# else:
# safe_symbol = self.clean_symbol(trade.symbol)
| extract_cash |
test_cov.py | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import itertools as itt
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events)
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.tests.common import assert_snr
from mne.utils import (_TempDir, requires_version, run_tests_if_main,
catch_logging)
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
pytest.raises(ValueError, compute_covariance, epochs,
on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo') | with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov():
"""Test ad hoc cov creation and I/O."""
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
def test_io_cov():
"""Test IO for noise covariance matrices."""
tempdir = _TempDir()
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(op.join(tempdir, 'test-cov.fif'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(op.join(tempdir, 'test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, ['empirical']))
def test_cov_estimation_on_raw(method):
"""Test estimation from raw (typically empty room)."""
tempdir = _TempDir()
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, tstep=None, method=method,
rank='full')
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e4)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full')
assert_equal(cov.nfree, cov_mne.nfree - 119) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 1e2)
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full')
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 1e4)
cov = compute_raw_covariance(raw_pick, method=method, rank='full')
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
cov = compute_raw_covariance(raw.copy().crop(0, 10.),
tstep=None, method=method,
reject=dict(eog=1000e-6),
verbose='error')
@pytest.mark.slowtest
@requires_version('sklearn', '0.15')
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# XXX don't use "shrunk" here, for some reason it makes Travis 2.7
# hang... "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank):
"""Test estimation from raw with triggers."""
tempdir = _TempDir()
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
_assert_cov(cov, read_cov(cov_km_fname))
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_version('sklearn', '0.15')
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_version('sklearn', '0.15')
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differencess
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagnoal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_array_equal(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask])
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert 'Not doing PCA for MAG.' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with pytest.warns(None):
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_version('sklearn', '0.15')
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-6000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-9000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-6000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_version('sklearn', '0.15')
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types()
assert len(epochs_meg.ch_names) == 306
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.deprecated_call(match='int is deprecated'):
cov_dict = compute_covariance(epochs_meg, method='oas', rank=306)
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
# test our deprecation: can simply remove later
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_version('sklearn', '0.15')
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
run_tests_if_main() | |
image_holder.rs | use std::cell::RefCell;
use std::rc::Rc;
/// Helper struct that allows passing the pixels to the Cairo image surface and once the
/// image surface is destroyed the pixels will be stored in the return_location.
///
/// This allows us to give temporary ownership of the pixels to the Cairo surface and later
/// retrieve them back in a safe way while ensuring that nothing else still has access to
/// it.
pub struct ImageHolder {
image: Option<Box<[u8]>>,
return_location: Rc<RefCell<Option<Box<[u8]>>>>,
}
impl ImageHolder {
pub fn | (image: Option<Box<[u8]>>, return_location: Rc<RefCell<Option<Box<[u8]>>>>) -> Self {
Self {
image,
return_location,
}
}
}
/// This stores the pixels back into the return_location as now nothing
/// references the pixels anymore
impl Drop for ImageHolder {
fn drop(&mut self) {
*self.return_location.borrow_mut() = Some(self.image.take().expect("Holding no image"));
}
}
/// Needed for ImageSurface::create_for_data() to be able to access the pixels
impl AsRef<[u8]> for ImageHolder {
fn as_ref(&self) -> &[u8] {
self.image.as_ref().expect("Holding no image").as_ref()
}
}
impl AsMut<[u8]> for ImageHolder {
fn as_mut(&mut self) -> &mut [u8] {
self.image.as_mut().expect("Holding no image").as_mut()
}
}
| new |
mmuser.go | package irckit
import (
"fmt"
"net"
"regexp"
"strings"
"time"
"github.com/42wim/matterbridge/matterclient"
"github.com/42wim/matterircd/config"
"github.com/mattermost/mattermost-server/model"
"github.com/muesli/reflow/wordwrap"
"github.com/sorcix/irc"
)
type MmInfo struct {
MmGhostUser bool
Srv Server
Credentials *MmCredentials
Cfg *MmCfg
mc *matterclient.MMClient
idleStop chan struct{}
}
type MmCredentials struct {
Login string
Team string
Pass string
Server string
}
type MmCfg struct {
AllowedServers []string
SlackSettings config.Settings
MattermostSettings config.Settings
DefaultServer string
DefaultTeam string
Insecure bool
SkipTLSVerify bool
JoinExclude []string
JoinInclude []string
PartFake bool
PrefixMainTeam bool
PasteBufferTimeout int
DisableAutoView bool
}
func NewUserMM(c net.Conn, srv Server, cfg *MmCfg) *User |
func (u *User) loginToMattermost() (*matterclient.MMClient, error) {
mc := matterclient.New(u.Credentials.Login, u.Credentials.Pass, u.Credentials.Team, u.Credentials.Server)
if u.Cfg.Insecure {
mc.Credentials.NoTLS = true
}
mc.Credentials.SkipTLSVerify = u.Cfg.SkipTLSVerify
mc.SetLogLevel(LogLevel)
logger.Infof("login as %s (team: %s) on %s", u.Credentials.Login, u.Credentials.Team, u.Credentials.Server)
err := mc.Login()
if err != nil {
logger.Error("login failed", err)
return nil, err
}
logger.Info("login succeeded")
u.mc = mc
u.mc.WsQuit = false
go mc.WsReceiver()
go u.handleWsMessage()
// do anti idle on town-square, every installation should have this channel
channels := u.mc.GetChannels()
for _, channel := range channels {
if channel.Name == "town-square" && !u.Cfg.DisableAutoView {
go u.antiIdle(channel.Id)
continue
}
}
return mc, nil
}
func (u *User) logoutFromMattermost() error {
logger.Infof("logout as %s (team: %s) on %s", u.Credentials.Login, u.Credentials.Team, u.Credentials.Server)
err := u.mc.Logout()
if err != nil {
logger.Error("logout failed")
}
logger.Info("logout succeeded")
u.Srv.Logout(u)
u.idleStop <- struct{}{}
return nil
}
func (u *User) createMMUser(mmuser *model.User) *User {
if mmuser == nil {
return nil
}
if ghost, ok := u.Srv.HasUser(mmuser.Username); ok {
return ghost
}
ghost := &User{Nick: mmuser.Username, User: mmuser.Id, Real: mmuser.FirstName + " " + mmuser.LastName, Host: u.mc.Client.Url, Roles: mmuser.Roles, channels: map[Channel]struct{}{}}
ghost.MmGhostUser = true
u.Srv.Add(ghost)
return ghost
}
func (u *User) createService(nick string, what string) {
service := &User{Nick: nick, User: nick, Real: what, Host: "service", channels: map[Channel]struct{}{}}
service.MmGhostUser = true
u.Srv.Add(service)
}
func (u *User) addUserToChannel(user *model.User, channel string, channelId string) {
if user == nil {
return
}
ghost := u.createMMUser(user)
if ghost == nil {
logger.Warnf("Cannot join %v into %s", user, channel)
return
}
logger.Debugf("adding %s to %s", ghost.Nick, channel)
ch := u.Srv.Channel(channelId)
ch.Join(ghost)
}
func (u *User) addUsersToChannels() {
srv := u.Srv
throttle := time.Tick(time.Millisecond * 50)
logger.Debug("in addUsersToChannels()")
// add all users, also who are not on channels
ch := srv.Channel("&users")
for _, mmuser := range u.mc.GetUsers() {
// do not add our own nick
if mmuser.Id == u.mc.User.Id {
continue
}
u.createMMUser(mmuser)
u.addUserToChannel(mmuser, "&users", "&users")
}
ch.Join(u)
// channel that receives messages from channels not joined on irc
ch = srv.Channel("&messages")
ch.Join(u)
channels := make(chan *model.Channel, 5)
for i := 0; i < 10; i++ {
go u.addUserToChannelWorker(channels, throttle)
}
for _, mmchannel := range u.mc.GetChannels() {
logger.Debugf("Adding channel %#v", mmchannel)
channels <- mmchannel
}
close(channels)
}
func (u *User) addUserToChannelWorker(channels <-chan *model.Channel, throttle <-chan time.Time) {
for {
mmchannel, ok := <-channels
if !ok {
logger.Debug("Done adding user to channels")
return
}
logger.Debug("addUserToChannelWorker", mmchannel)
<-throttle
// exclude direct messages
var spoof func(string, string)
if strings.Contains(mmchannel.Name, "__") {
userId := strings.Split(mmchannel.Name, "__")[0]
u.createMMUser(u.mc.GetUser(userId))
// wrap MsgSpoofser here
spoof = func(spoofUsername string, msg string) {
u.MsgSpoofUser(u, spoofUsername, msg)
}
} else {
channelName := mmchannel.Name
if mmchannel.TeamId != u.mc.Team.Id || u.Cfg.PrefixMainTeam {
channelName = u.mc.GetTeamName(mmchannel.TeamId) + "/" + mmchannel.Name
}
u.syncMMChannel(mmchannel.Id, channelName)
ch := u.Srv.Channel(mmchannel.Id)
spoof = ch.SpoofMessage
}
since := u.mc.GetLastViewedAt(mmchannel.Id)
// ignore invalid/deleted/old channels
if since == 0 {
continue
}
// post everything to the channel you haven't seen yet
postlist := u.mc.GetPostsSince(mmchannel.Id, since)
if postlist == nil {
// if the channel is not from the primary team id, we can't get posts
if mmchannel.TeamId == u.mc.Team.Id {
logger.Errorf("something wrong with getPostsSince for channel %s (%s)", mmchannel.Id, mmchannel.Name)
}
continue
}
var prevDate string
// traverse the order in reverse
for i := len(postlist.Order) - 1; i >= 0; i-- {
p := postlist.Posts[postlist.Order[i]]
if p.Type == model.POST_JOIN_LEAVE {
continue
}
if p.DeleteAt > p.CreateAt {
continue
}
ts := time.Unix(0, p.CreateAt*int64(time.Millisecond))
for _, post := range strings.Split(p.Message, "\n") {
if user, ok := u.mc.Users[p.UserId]; ok {
date := ts.Format("2006-01-02")
if date != prevDate {
spoof("matterircd", fmt.Sprintf("Replaying since %s", date))
prevDate = date
}
spoof(user.Username, fmt.Sprintf("[%s] %s", ts.Format("15:04"), post))
}
}
}
if !u.Cfg.DisableAutoView {
u.mc.UpdateLastViewed(mmchannel.Id)
}
}
}
func (u *User) handleWsMessage() {
updateChannelsThrottle := time.Tick(time.Second * 60)
for {
if u.mc.WsQuit {
logger.Debug("exiting handleWsMessage")
return
}
logger.Debug("in handleWsMessage", len(u.mc.MessageChan))
message := <-u.mc.MessageChan
logger.Debugf("MMUser WsReceiver: %#v", message.Raw)
// check if we have the users/channels in our cache. If not update
u.checkWsActionMessage(message.Raw, updateChannelsThrottle)
switch message.Raw.Event {
case model.WEBSOCKET_EVENT_POSTED:
u.handleWsActionPost(message.Raw)
case model.WEBSOCKET_EVENT_POST_EDITED:
u.handleWsActionPost(message.Raw)
case model.WEBSOCKET_EVENT_USER_REMOVED:
u.handleWsActionUserRemoved(message.Raw)
case model.WEBSOCKET_EVENT_USER_ADDED:
u.handleWsActionUserAdded(message.Raw)
case model.WEBSOCKET_EVENT_CHANNEL_CREATED:
u.handleWsActionChannelCreated(message.Raw)
case model.WEBSOCKET_EVENT_CHANNEL_DELETED:
u.handleWsActionChannelDeleted(message.Raw)
}
}
}
func (u *User) handleWsActionPost(rmsg *model.WebSocketEvent) {
var ch Channel
data := model.PostFromJson(strings.NewReader(rmsg.Data["post"].(string)))
props := rmsg.Data
extraProps := model.StringInterfaceFromJson(strings.NewReader(rmsg.Data["post"].(string)))["props"].(map[string]interface{})
logger.Debugf("handleWsActionPost() receiving userid %s", data.UserId)
if rmsg.Event == model.WEBSOCKET_EVENT_POST_EDITED && data.HasReactions == true {
logger.Debugf("edit post with reactions, do not relay. We don't know if a reaction is added or the post has been edited")
return
}
if data.UserId == u.mc.User.Id {
if _, ok := extraProps["matterircd_"+u.mc.User.Id].(bool); ok {
logger.Debugf("message is sent from matterirc, not relaying %#v", data.Message)
return
}
if data.Type == model.POST_JOIN_LEAVE || data.Type == model.POST_JOIN_CHANNEL {
logger.Debugf("our own join/leave message. not relaying %#v", data.Message)
return
}
}
if data.ParentId != "" {
parentPost, resp := u.mc.Client.GetPost(data.ParentId, "")
if resp.Error != nil {
logger.Debugf("Unable to get parent post for", data)
} else {
threadPosts, resp := u.mc.Client.GetPostThread(data.Id, "")
if resp.Error != nil {
logger.Debugf("Unable to get thread for", data)
} else {
parentGhost := u.createMMUser(u.mc.GetUser(parentPost.UserId))
if len(threadPosts.Posts) == 2 {
// first response
data.Message = fmt.Sprintf("[%s] %s (re @%s: %s)", data.ParentId[0:6], data.Message, parentGhost.Nick, shortenMessage(parentPost.Message))
} else {
data.Message = fmt.Sprintf("[%s] %s", data.ParentId[0:6], data.Message)
}
}
}
}
// create new "ghost" user
ghost := u.createMMUser(u.mc.GetUser(data.UserId))
// our own message, set our IRC self as user, not our mattermost self
if data.UserId == u.mc.User.Id {
ghost = u
}
spoofUsername := data.UserId
if ghost != nil {
spoofUsername = ghost.Nick
}
// if we got attachments (eg slack attachments) and we have a fallback message, show this.
if entries, ok := extraProps["attachments"].([]interface{}); ok {
for _, entry := range entries {
if f, ok := entry.(map[string]interface{}); ok {
data.Message = data.Message + "\n" + f["fallback"].(string)
}
}
}
// check if we have a override_username (from webhooks) and use it
overrideUsername, _ := extraProps["override_username"].(string)
if overrideUsername != "" {
// only allow valid irc nicks
re := regexp.MustCompile("^[a-zA-Z0-9_]*$")
if re.MatchString(overrideUsername) {
spoofUsername = overrideUsername
}
}
msgs := strings.Split(data.Message, "\n")
if data.Type == model.POST_JOIN_LEAVE || data.Type == "system_leave_channel" || data.Type == "system_join_channel" || data.Type == "system_add_to_channel" || data.Type == "system_remove_from_channel" {
logger.Debugf("join/leave message. not relaying %#v", data.Message)
u.mc.UpdateChannels()
ch = u.Srv.Channel(data.ChannelId)
if data.Type == "system_add_to_channel" {
if added, ok := extraProps["addedUsername"].(string); ok {
user, resp := u.mc.Client.GetUserByUsername(added, "")
if resp.Error != nil {
fmt.Println(resp.Error)
return
}
ghost := u.createMMUser(user)
// we are added ourselves
if user.Id == u.mc.User.Id {
u.syncMMChannel(data.ChannelId, u.mc.GetChannelName(data.ChannelId))
return
}
ch.Join(ghost)
if adder, ok := extraProps["username"].(string); ok {
ch.SpoofMessage("system", "added "+added+" to the channel by "+adder)
}
}
return
}
if data.Type == "system_remove_from_channel" {
if removed, ok := extraProps["removedUsername"].(string); ok {
user, resp := u.mc.Client.GetUserByUsername(removed, "")
if resp.Error != nil {
fmt.Println(resp.Error)
return
}
ghost := u.createMMUser(user)
// we are removed
// TODO this doesn't actually work yet, we don't see the "system_remove_from_channel" message when we are removed ourselves
if user.Id == u.mc.User.Id {
ch.Part(u, "")
return
}
ch.Part(ghost, "")
ch.SpoofMessage("system", "removed "+removed+" from the channel")
}
return
}
if ghost == nil {
return
}
if !ch.HasUser(ghost) {
// TODO use u.handleWsActionUserAdded()
ch.Join(ghost)
} else {
// TODO use u.handleWsActionUserRemoved()
//u.handleWsActionUserRemoved(data)
ch.Part(ghost, "")
}
return
}
if data.Type == "system_header_change" {
ch = u.Srv.Channel(data.ChannelId)
if topic, ok := extraProps["new_header"].(string); ok {
if topicuser, ok := extraProps["username"].(string); ok {
tu, _ := u.Srv.HasUser(topicuser)
ch.Topic(tu, topic)
}
}
return
}
// not a private message so do channel stuff
if props["channel_type"] != "D" && ghost != nil {
ch = u.Srv.Channel(data.ChannelId)
// in an group
if props["channel_type"] == "G" {
myself := u.createMMUser(u.mc.User)
if !ch.HasUser(myself) {
ch.Join(myself)
u.syncMMChannel(data.ChannelId, u.mc.GetChannelName(data.ChannelId))
}
}
// join if not in channel
if !ch.HasUser(ghost) {
logger.Debugf("User %s is not in channel %s. Joining now", ghost.Nick, ch.String())
//ch = u.Srv.Channel("&messages")
ch.Join(ghost)
}
// excluded channel
if stringInSlice(ch.String(), u.Cfg.JoinExclude) {
logger.Debugf("channel %s is in JoinExclude, send to &messages", ch.String())
ch = u.Srv.Channel("&messages")
}
// not in included channel
if len(u.Cfg.JoinInclude) > 0 && !stringInSlice(ch.String(), u.Cfg.JoinInclude) {
logger.Debugf("channel %s is not in JoinInclude, send to &messages", ch.String())
ch = u.Srv.Channel("&messages")
}
}
// add an edited string when messages are edited
if len(msgs) > 0 && rmsg.Event == model.WEBSOCKET_EVENT_POST_EDITED {
msgs[len(msgs)-1] = msgs[len(msgs)-1] + " (edited)"
}
// append channel name where messages are sent from
if props["channel_type"] != "D" && ch.ID() == "&messages" {
spoofUsername += "/" + u.Srv.Channel(data.ChannelId).String()
}
for _, m := range msgs {
if m == "" {
continue
}
if props["channel_type"] == "D" {
if data.UserId == u.mc.User.Id {
// we have to look in the mention to see who we are sending a message to
mentions := model.ArrayFromJson(strings.NewReader(props["mentions"].(string)))
if len(mentions) > 0 {
spoofUsername = u.mc.GetUserName(mentions[0])
u.MsgSpoofUser(u, spoofUsername, m)
}
} else {
u.MsgSpoofUser(ghost, spoofUsername, m)
}
continue
}
if strings.Contains(data.Message, "@channel") || strings.Contains(data.Message, "@here") {
ch.SpoofNotice(spoofUsername, m)
continue
}
ch.SpoofMessage(spoofUsername, m)
}
if len(data.FileIds) > 0 {
logger.Debugf("files detected")
for _, fname := range u.mc.GetFileLinks(data.FileIds) {
if props["channel_type"] == "D" {
if data.UserId == u.mc.User.Id {
// we have to look in the mention to see who we are sending a message to
mentions := model.ArrayFromJson(strings.NewReader(props["mentions"].(string)))
if len(mentions) > 0 {
spoofUsername = u.mc.GetUserName(mentions[0])
u.MsgSpoofUser(u, spoofUsername, "download file -"+fname)
}
} else {
u.MsgSpoofUser(ghost, spoofUsername, "download file -"+fname)
}
} else {
ch.SpoofMessage(spoofUsername, "download file - "+fname)
}
}
}
logger.Debugf("handleWsActionPost() user %s sent %s", u.mc.GetUser(data.UserId).Username, data.Message)
logger.Debugf("%#v", data)
// updatelastviewed
if !u.Cfg.DisableAutoView {
u.mc.UpdateLastViewed(data.ChannelId)
}
}
func (u *User) handleWsActionUserRemoved(rmsg *model.WebSocketEvent) {
logger.Debugf("in handleWsActionUserRemoved rmsg: %#v\n", rmsg)
logger.Debugf("in handleWsActionUserRemoved rmsg.Broadcast: %#v\n", rmsg.Broadcast)
if rmsg.Broadcast == nil {
return
}
channelId, ok := rmsg.Data["channel_id"].(string)
if !ok {
return
}
userId := rmsg.Broadcast.UserId
ch := u.Srv.Channel(channelId)
// remove ourselves from the channel
if userId == u.mc.User.Id {
ch.Part(u, "")
return
}
ghost := u.createMMUser(u.mc.GetUser(userId))
if ghost == nil {
logger.Debugf("couldn't remove user %s (%s)", userId, u.mc.GetUser(userId).Username)
return
}
ch.Part(ghost, "")
}
func (u *User) handleWsActionUserAdded(rmsg *model.WebSocketEvent) {
userId, ok := rmsg.Data["user_id"].(string)
if !ok {
return
}
// add ourselves to the channel
if userId == u.mc.User.Id {
if u.mc.GetChannelName(rmsg.Broadcast.ChannelId) == "" {
u.mc.UpdateChannels()
}
logger.Debugf("ACTION_USER_ADDED adding myself to %s (%s)", u.mc.GetChannelName(rmsg.Broadcast.ChannelId), rmsg.Broadcast.ChannelId)
u.syncMMChannel(rmsg.Broadcast.ChannelId, u.mc.GetChannelName(rmsg.Broadcast.ChannelId))
ch := u.Srv.Channel(rmsg.Broadcast.ChannelId)
ch.Join(u)
return
}
u.addUserToChannel(u.mc.GetUser(userId), "#"+u.mc.GetChannelName(rmsg.Broadcast.ChannelId), rmsg.Broadcast.ChannelId)
}
func (u *User) handleWsActionChannelCreated(rmsg *model.WebSocketEvent) {
channelId, ok := rmsg.Data["channel_id"].(string)
if !ok {
return
}
u.mc.UpdateChannels()
logger.Debugf("ACTION_CHANNEL_CREATED adding myself to %s (%s)", u.mc.GetChannelName(channelId), channelId)
u.syncMMChannel(channelId, u.mc.GetChannelName(channelId))
}
func (u *User) handleWsActionChannelDeleted(rmsg *model.WebSocketEvent) {
channelId, ok := rmsg.Data["channel_id"].(string)
if !ok {
return
}
ch := u.Srv.Channel(channelId)
// remove ourselves from the channel
logger.Debugf("ACTION_CHANNEL_DELETED removing myself from %s (%s)", u.mc.GetChannelName(channelId), channelId)
ch.Part(u, "")
}
func (u *User) checkWsActionMessage(rmsg *model.WebSocketEvent, throttle <-chan time.Time) {
if u.mc.GetChannelName(rmsg.Broadcast.ChannelId) == "" {
select {
case <-throttle:
logger.Debugf("Updating channels for %#v", rmsg.Broadcast)
go u.mc.UpdateChannels()
default:
}
}
if rmsg.Data == nil {
return
}
}
func (u *User) MsgUser(toUser *User, msg string) {
u.Encode(&irc.Message{
Prefix: toUser.Prefix(),
Command: irc.PRIVMSG,
Params: []string{u.Nick},
Trailing: msg,
})
}
func (u *User) MsgSpoofUser(sender *User, rcvuser string, msg string) {
msg = wordwrap.String(msg, 440)
lines := strings.Split(msg, "\n")
for _, l := range lines {
l = strings.TrimSpace(l)
if len(l) == 0 {
continue
}
u.Encode(&irc.Message{
Prefix: &irc.Prefix{Name: sender.Nick, User: sender.Nick, Host: sender.Host},
Command: irc.PRIVMSG,
Params: []string{rcvuser},
Trailing: l + "\n",
})
}
}
// sync IRC with mattermost channel state
func (u *User) syncMMChannel(id string, name string) {
srv := u.Srv
idx := 0
max := 200
var mmusers []*model.User
mmusersPaged, resp := u.mc.Client.GetUsersInChannel(id, idx, max, "")
if resp.Error != nil {
return
}
for len(mmusersPaged) > 0 {
mmusersPaged, resp = u.mc.Client.GetUsersInChannel(id, idx, max, "")
if resp.Error != nil {
return
}
idx++
time.Sleep(time.Millisecond * 200)
mmusers = append(mmusers, mmusersPaged...)
}
logger.Debugf("found %d users in channel %s", len(mmusers), name)
for _, user := range mmusers {
if user.Id != u.mc.User.Id {
u.addUserToChannel(user, "#"+name, id)
}
}
// before joining ourself
for _, user := range mmusers {
// join all the channels we're on on MM
if user.Id == u.mc.User.Id {
ch := srv.Channel(id)
// only join when we're not yet on the channel
if !ch.HasUser(u) {
logger.Debugf("syncMMChannel adding myself to %s (id: %s)", name, id)
if !stringInSlice(ch.String(), u.Cfg.JoinExclude) {
if len(u.Cfg.JoinInclude) > 0 {
if stringInSlice(ch.String(), u.Cfg.JoinInclude) {
ch.Join(u)
}
} else {
ch.Join(u)
}
}
svc, _ := srv.HasUser("mattermost")
ch.Topic(svc, u.mc.GetChannelHeader(id))
}
break
}
}
}
func (u *User) isValidMMServer(server string) bool {
if len(u.Cfg.AllowedServers) > 0 {
logger.Debugf("allowedservers: %s", u.Cfg.AllowedServers)
for _, srv := range u.Cfg.AllowedServers {
if srv == server {
return true
}
}
return false
}
return true
}
// antiIdle does a lastviewed every 60 seconds so that the user is shown as online instead of away
func (u *User) antiIdle(channelId string) {
ticker := time.NewTicker(time.Second * 60)
for {
select {
case <-u.idleStop:
logger.Debug("stopping antiIdle loop")
return
case <-ticker.C:
u.mc.UpdateLastViewed(channelId)
}
}
}
| {
u := NewUser(&conn{
Conn: c,
Encoder: irc.NewEncoder(c),
Decoder: irc.NewDecoder(c),
})
u.Srv = srv
u.MmInfo.Cfg = cfg
u.MmInfo.Cfg.AllowedServers = cfg.MattermostSettings.Restrict
u.MmInfo.Cfg.DefaultServer = cfg.MattermostSettings.DefaultServer
u.MmInfo.Cfg.DefaultTeam = cfg.MattermostSettings.DefaultTeam
u.MmInfo.Cfg.JoinInclude = cfg.MattermostSettings.JoinInclude
u.MmInfo.Cfg.JoinExclude = cfg.MattermostSettings.JoinExclude
u.MmInfo.Cfg.PartFake = cfg.MattermostSettings.PartFake
u.MmInfo.Cfg.Insecure = cfg.MattermostSettings.Insecure
u.MmInfo.Cfg.SkipTLSVerify = cfg.MattermostSettings.SkipTLSVerify
u.MmInfo.Cfg.PrefixMainTeam = cfg.MattermostSettings.PrefixMainTeam
u.MmInfo.Cfg.DisableAutoView = cfg.MattermostSettings.DisableAutoView
u.idleStop = make(chan struct{})
// used for login
u.createService("mattermost", "loginservice")
u.createService("slack", "loginservice")
return u
} |
device_data.ts | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {Injectable} from '@angular/core';
import {BehaviorSubject} from 'rxjs/BehaviorSubject';
import {Observable} from 'rxjs/Observable';
import {tap} from 'rxjs/operators';
import {Device} from '../../models/device';
import {DeviceApiParams} from '../../models/device';
import {DeviceService} from '../../services/device';
/** Device data class to be displayed in a mat-table for device_list_table. */
@Injectable()
export class DeviceData {
/** Data that the backend streams to the frontend. */
dataChange = new BehaviorSubject<Device[]>([]);
constructor(private readonly deviceService: DeviceService) {
this.refresh();
}
/** Property to return a list of shelves based on streamed data. */
get data(): Device[] {
return this.dataChange.value;
}
/**
* Updates the device data from the DeviceService.
* @param filters The device api filters.
*/
refresh(filters: DeviceApiParams = {}): Observable<Device[]> {
return this.deviceService.list(filters).pipe(tap(devices => {
this.dataChange.next(devices);
})); | * Clears the device data.
*/
clearData() {
this.dataChange.next([]);
}
} | }
/** |
resource_aws_ecr_lifecycle_policy_test.go | package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func TestAccAWSEcrLifecyclePolicy_basic(t *testing.T) {
randString := acctest.RandString(10)
rName := fmt.Sprintf("tf-acc-test-lifecycle-%s", randString)
resourceName := "aws_ecr_lifecycle_policy.test"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcrLifecyclePolicyDestroy,
Steps: []resource.TestStep{
{
Config: testAccEcrLifecyclePolicyConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcrLifecyclePolicyExists(resourceName),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccCheckAWSEcrLifecyclePolicyDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ecrconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_ecr_lifecycle_policy" {
continue
}
input := &ecr.GetLifecyclePolicyInput{
RepositoryName: aws.String(rs.Primary.ID),
}
_, err := conn.GetLifecyclePolicy(input)
if err != nil {
if isAWSErr(err, ecr.ErrCodeRepositoryNotFoundException, "") {
return nil
}
if isAWSErr(err, ecr.ErrCodeLifecyclePolicyNotFoundException, "") {
return nil
}
return err
}
}
return nil
}
func testAccCheckAWSEcrLifecyclePolicyExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
conn := testAccProvider.Meta().(*AWSClient).ecrconn
input := &ecr.GetLifecyclePolicyInput{
RepositoryName: aws.String(rs.Primary.ID),
}
_, err := conn.GetLifecyclePolicy(input)
return err
} | }
func testAccEcrLifecyclePolicyConfig(rName string) string {
return fmt.Sprintf(`
resource "aws_ecr_repository" "test" {
name = "%s"
}
resource "aws_ecr_lifecycle_policy" "test" {
repository = aws_ecr_repository.test.name
policy = <<EOF
{
"rules": [
{
"rulePriority": 1,
"description": "Expire images older than 14 days",
"selection": {
"tagStatus": "untagged",
"countType": "sinceImagePushed",
"countUnit": "days",
"countNumber": 14
},
"action": {
"type": "expire"
}
}
]
}
EOF
}
`, rName)
} | |
widget.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, List, Optional, TYPE_CHECKING, Union
from .utils import snowflake_time, _get_as_snowflake, resolve_invite
from .user import BaseUser
from .activity import Activity, BaseActivity, Spotify, create_activity
from .invite import Invite
from .enums import Status, try_enum
if TYPE_CHECKING:
import datetime
from .state import ConnectionState
from .types.widget import (
WidgetMember as WidgetMemberPayload,
Widget as WidgetPayload,
)
__all__ = (
'WidgetChannel',
'WidgetMember',
'Widget',
)
class WidgetChannel:
"""Represents a "partial" widget channel.
.. container:: operations
.. describe:: x == y
Checks if two partial channels are the same.
.. describe:: x != y
Checks if two partial channels are not the same.
.. describe:: hash(x)
Return the partial channel's hash.
.. describe:: str(x)
Returns the partial channel's name.
Attributes
-----------
id: :class:`int`
The channel's ID.
name: :class:`str`
The channel's name.
position: :class:`int`
The channel's position
"""
__slots__ = ('id', 'name', 'position')
def __init__(self, id: int, name: str, position: int) -> None:
self.id: int = id
self.name: str = name
self.position: int = position
def __str__(self) -> str:
|
def __repr__(self) -> str:
return f'<WidgetChannel id={self.id} name={self.name!r} position={self.position!r}>'
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f'<#{self.id}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return snowflake_time(self.id)
class WidgetMember(BaseUser):
"""Represents a "partial" member of the widget's guild.
.. container:: operations
.. describe:: x == y
Checks if two widget members are the same.
.. describe:: x != y
Checks if two widget members are not the same.
.. describe:: hash(x)
Return the widget member's hash.
.. describe:: str(x)
Returns the widget member's `name#discriminator`.
Attributes
-----------
id: :class:`int`
The member's ID.
name: :class:`str`
The member's username.
discriminator: :class:`str`
The member's discriminator.
bot: :class:`bool`
Whether the member is a bot.
status: :class:`Status`
The member's status.
nick: Optional[:class:`str`]
The member's nickname.
avatar: Optional[:class:`str`]
The member's avatar hash.
activity: Optional[Union[:class:`BaseActivity`, :class:`Spotify`]]
The member's activity.
deafened: Optional[:class:`bool`]
Whether the member is currently deafened.
muted: Optional[:class:`bool`]
Whether the member is currently muted.
suppress: Optional[:class:`bool`]
Whether the member is currently being suppressed.
connected_channel: Optional[:class:`WidgetChannel`]
Which channel the member is connected to.
"""
__slots__ = ('name', 'status', 'nick', 'avatar', 'discriminator',
'id', 'bot', 'activity', 'deafened', 'suppress', 'muted',
'connected_channel')
if TYPE_CHECKING:
activity: Optional[Union[BaseActivity, Spotify]]
def __init__(
self,
*,
state: ConnectionState,
data: WidgetMemberPayload,
connected_channel: Optional[WidgetChannel] = None
) -> None:
super().__init__(state=state, data=data)
self.nick: Optional[str] = data.get('nick')
self.status: Status = try_enum(Status, data.get('status'))
self.deafened: Optional[bool] = data.get('deaf', False) or data.get('self_deaf', False)
self.muted: Optional[bool] = data.get('mute', False) or data.get('self_mute', False)
self.suppress: Optional[bool] = data.get('suppress', False)
try:
game = data['game']
except KeyError:
activity = None
else:
activity = create_activity(game)
self.activity: Optional[Union[BaseActivity, Spotify]] = activity
self.connected_channel: Optional[WidgetChannel] = connected_channel
def __repr__(self) -> str:
return (
f"<WidgetMember name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} nick={self.nick!r}>"
)
@property
def display_name(self) -> str:
""":class:`str`: Returns the member's display name."""
return self.nick or self.name
class Widget:
"""Represents a :class:`Guild` widget.
.. container:: operations
.. describe:: x == y
Checks if two widgets are the same.
.. describe:: x != y
Checks if two widgets are not the same.
.. describe:: str(x)
Returns the widget's JSON URL.
Attributes
-----------
id: :class:`int`
The guild's ID.
name: :class:`str`
The guild's name.
channels: List[:class:`WidgetChannel`]
The accessible voice channels in the guild.
members: List[:class:`Member`]
The online members in the server. Offline members
do not appear in the widget.
.. note::
Due to a Discord limitation, if this data is available
the users will be "anonymized" with linear IDs and discriminator
information being incorrect. Likewise, the number of members
retrieved is capped.
"""
__slots__ = ('_state', 'channels', '_invite', 'id', 'members', 'name')
def __init__(self, *, state: ConnectionState, data: WidgetPayload) -> None:
self._state = state
self._invite = data['instant_invite']
self.name: str = data['name']
self.id: int = int(data['id'])
self.channels: List[WidgetChannel] = []
for channel in data.get('channels', []):
_id = int(channel['id'])
self.channels.append(WidgetChannel(id=_id, name=channel['name'], position=channel['position']))
self.members: List[WidgetMember] = []
channels = {channel.id: channel for channel in self.channels}
for member in data.get('members', []):
connected_channel = _get_as_snowflake(member, 'channel_id')
if connected_channel in channels:
connected_channel = channels[connected_channel] # type: ignore
elif connected_channel:
connected_channel = WidgetChannel(id=connected_channel, name='', position=0)
self.members.append(WidgetMember(state=self._state, data=member, connected_channel=connected_channel)) # type: ignore
def __str__(self) -> str:
return self.json_url
def __eq__(self, other: Any) -> bool:
return self.id == other.id if isinstance(other, Widget) else False
def __repr__(self) -> str:
return f'<Widget id={self.id} name={self.name!r} invite_url={self.invite_url!r}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the member's creation time in UTC."""
return snowflake_time(self.id)
@property
def json_url(self) -> str:
""":class:`str`: The JSON URL of the widget."""
return f"https://discord.com/api/guilds/{self.id}/widget.json"
@property
def invite_url(self) -> str:
"""Optional[:class:`str`]: The invite URL for the guild, if available."""
return self._invite
async def fetch_invite(self, *, with_counts: bool = True) -> Invite:
"""|coro|
Retrieves an :class:`Invite` from the widget's invite URL.
This is the same as :meth:`Client.fetch_invite`; the invite
code is abstracted away.
Parameters
-----------
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`Invite.approximate_member_count` and :attr:`Invite.approximate_presence_count`
fields.
Returns
--------
:class:`Invite`
The invite from the widget's invite URL.
"""
invite_id = resolve_invite(self._invite)
data = await self._state.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._state, data=data)
| return self.name |
commodity.component.ts | import { Component, EventEmitter } from '@angular/core';
import { Router } from '@angular/router';
import { ViewCommodityComponent } from './../viewCommodity/index';
import { EditCommodityComponent } from './../editCommodity/index';
import { Commodity } from '../_models/index';
import { AlertService, CommodityService } from '../_services/index';
@Component({
moduleId: module.id.toString(),
templateUrl: 'commodity.component.html',
styles: ['.pagination { margin: 0px !important; }']
})
export class CommodityComponent {
model: any = {};
loading = false;
sampleData : string ="some commodity component data";
commodities: Commodity[];
// pagination
filteredItems : Commodity[];
pages : number = 4;
pageSize : number = 5;
pageNumber : number = 0;
currentIndex : number = 1;
items: Commodity[];
pagesIndex : Array<number>;
pageStart : number = 1;
inputName : string = '';
constructor(private router: Router, private commodityService: CommodityService,
private alertService: AlertService) {
this.getAllCommodities();
// pagination
this.filteredItems = this.commodities;
this.init();
}
getAllCommodities() {
this.loading = true;
this.commodityService.getAll()
.subscribe( commodities => { this.commodities = commodities; });
}
viewItem(event: any) {
console.log('value:', event.id);
localStorage.removeItem("selectedCommodity");
localStorage.setItem("selectedCommodity", JSON.stringify(event));
this.router.navigate(["/viewCommodity"]);
}
editItem(event: any) {
console.log('value:', event.id);
localStorage.removeItem("selectedCommodity");
localStorage.setItem("selectedCommodity", JSON.stringify(event));
this.router.navigate(["/editCommodity"]);
}
deleteItem(event: any) {
console.log('value:', event.id);
this.commodityService.delete(event);
this.commodityService.getAll().subscribe(
commodities => { this.commodities = commodities; });
}
// pagination
init() {
this.currentIndex = 1;
this.pageStart = 1;
this.pages = 4;
console.log("we are here 1 ...");
if(this.filteredItems && this.filteredItems.length) {
this.pageNumber = parseInt(""+ (this.filteredItems.length / this.pageSize));
if(this.filteredItems.length % this.pageSize != 0){
this.pageNumber ++;
}
}
console.log("we are here 2 ...");
if(this.pageNumber < this.pages){
this.pages = this.pageNumber;
}
this.refreshItems();
console.log("this.pageNumber : "+this.pageNumber);
}
FilterByName(){
this.filteredItems = [];
if(this.inputName != ""){
this.commodities.forEach(element => {
if(element.name.toUpperCase().indexOf(this.inputName.toUpperCase())>=0){
this.filteredItems.push(element);
}
});
}else{
this.filteredItems = this.commodities;
}
console.log(this.filteredItems);
this.init();
}
fillArray(): any{
var obj = new Array();
for(var index = this.pageStart; index< this.pageStart + this.pages; index ++) {
obj.push(index);
}
return obj;
}
refreshItems(){
if(this.filteredItems && this.filteredItems.slice) {
this.items = this.filteredItems.slice((this.currentIndex - 1)*this.pageSize, (this.currentIndex) * this.pageSize);
this.pagesIndex = this.fillArray();
}
}
prevPage(){
if(this.currentIndex>1){
this.currentIndex --;
}
if(this.currentIndex < this.pageStart){
this.pageStart = this.currentIndex;
}
this.refreshItems();
}
nextPage(){
if(this.currentIndex < this.pageNumber){
this.currentIndex ++;
}
if(this.currentIndex >= (this.pageStart + this.pages)){
this.pageStart = this.currentIndex - this.pages + 1;
}
| setPage(index : number){
this.currentIndex = index;
this.refreshItems();
}
} | this.refreshItems();
} |
child-1.component.ts | import { Component } from '@angular/core';
import { OnDestroy$, takeUntilDestroyed } from '@pdtec/ngx-observable-lifecycle';
import { ObservableService } from './observable.service';
@Component({
selector: 'app-child-1',
template: '<div>Child 1 Value: {{value}}</div>',
})
export class Child1Component extends OnDestroy$ { |
constructor(service: ObservableService) {
super();
service.value$
.pipe(takeUntilDestroyed(this))
.subscribe(x => this.value = x);
}
} | public value: number | undefined; |
obj.go | // Inferno utils/6l/obj.c
// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth ([email protected])
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth ([email protected])
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package main
import (
"cmd/internal/ld"
"cmd/internal/obj"
"fmt"
"log"
)
// Reading object files.
func main() {
linka | inkarchinit() {
ld.Thestring = "amd64"
ld.Thelinkarch = &ld.Linkamd64
if obj.Getgoarch() == "amd64p32" {
ld.Thelinkarch = &ld.Linkamd64p32
}
ld.Thearch.Thechar = thechar
ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize
ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Adddynlib = adddynlib
ld.Thearch.Adddynrel = adddynrel
ld.Thearch.Adddynsym = adddynsym
ld.Thearch.Archinit = archinit
ld.Thearch.Archreloc = archreloc
ld.Thearch.Archrelocvariant = archrelocvariant
ld.Thearch.Asmb = asmb
ld.Thearch.Elfreloc1 = elfreloc1
ld.Thearch.Elfsetupplt = elfsetupplt
ld.Thearch.Gentext = gentext
ld.Thearch.Machoreloc1 = machoreloc1
ld.Thearch.PEreloc1 = pereloc1
ld.Thearch.Lput = ld.Lputl
ld.Thearch.Wput = ld.Wputl
ld.Thearch.Vput = ld.Vputl
ld.Thearch.Linuxdynld = "/lib64/ld-linux-x86-64.so.2"
ld.Thearch.Freebsddynld = "/libexec/ld-elf.so.1"
ld.Thearch.Openbsddynld = "/usr/libexec/ld.so"
ld.Thearch.Netbsddynld = "/libexec/ld.elf_so"
ld.Thearch.Dragonflydynld = "/usr/libexec/ld-elf.so.2"
ld.Thearch.Solarisdynld = "/lib/amd64/ld.so.1"
}
func archinit() {
// getgoextlinkenabled is based on GO_EXTLINK_ENABLED when
// Go was built; see ../../make.bash.
if ld.Linkmode == ld.LinkAuto && obj.Getgoextlinkenabled() == "0" {
ld.Linkmode = ld.LinkInternal
}
if ld.Flag_shared != 0 {
ld.Linkmode = ld.LinkExternal
}
switch ld.HEADTYPE {
default:
if ld.Linkmode == ld.LinkAuto {
ld.Linkmode = ld.LinkInternal
}
if ld.Linkmode == ld.LinkExternal && obj.Getgoextlinkenabled() != "1" {
log.Fatalf("cannot use -linkmode=external with -H %s", ld.Headstr(int(ld.HEADTYPE)))
}
case ld.Hdarwin,
ld.Hdragonfly,
ld.Hfreebsd,
ld.Hlinux,
ld.Hnacl,
ld.Hnetbsd,
ld.Hopenbsd,
ld.Hsolaris,
ld.Hwindows:
break
}
switch ld.HEADTYPE {
default:
ld.Diag("unknown -H option")
ld.Errorexit()
fallthrough
case ld.Hplan9: /* plan 9 */
ld.HEADR = 32 + 8
if ld.INITTEXT == -1 {
ld.INITTEXT = 0x200000 + int64(ld.HEADR)
}
if ld.INITDAT == -1 {
ld.INITDAT = 0
}
if ld.INITRND == -1 {
ld.INITRND = 0x200000
}
case ld.Helf: /* elf32 executable */
ld.HEADR = int32(ld.Rnd(52+3*32, 16))
if ld.INITTEXT == -1 {
ld.INITTEXT = 0x80110000
}
if ld.INITDAT == -1 {
ld.INITDAT = 0
}
if ld.INITRND == -1 {
ld.INITRND = 4096
}
case ld.Hdarwin: /* apple MACH */
ld.Machoinit()
ld.HEADR = ld.INITIAL_MACHO_HEADR
if ld.INITRND == -1 {
ld.INITRND = 4096
}
if ld.INITTEXT == -1 {
ld.INITTEXT = 4096 + int64(ld.HEADR)
}
if ld.INITDAT == -1 {
ld.INITDAT = 0
}
case ld.Hlinux, /* elf64 executable */
ld.Hfreebsd, /* freebsd */
ld.Hnetbsd, /* netbsd */
ld.Hopenbsd, /* openbsd */
ld.Hdragonfly, /* dragonfly */
ld.Hsolaris: /* solaris */
ld.Elfinit()
ld.HEADR = ld.ELFRESERVE
if ld.INITTEXT == -1 {
ld.INITTEXT = (1 << 22) + int64(ld.HEADR)
}
if ld.INITDAT == -1 {
ld.INITDAT = 0
}
if ld.INITRND == -1 {
ld.INITRND = 4096
}
case ld.Hnacl:
ld.Elfinit()
ld.Debug['w']++ // disable dwarf, which gets confused and is useless anyway
ld.HEADR = 0x10000
ld.Funcalign = 32
if ld.INITTEXT == -1 {
ld.INITTEXT = 0x20000
}
if ld.INITDAT == -1 {
ld.INITDAT = 0
}
if ld.INITRND == -1 {
ld.INITRND = 0x10000
}
case ld.Hwindows: /* PE executable */
ld.Peinit()
ld.HEADR = ld.PEFILEHEADR
if ld.INITTEXT == -1 {
ld.INITTEXT = ld.PEBASE + int64(ld.PESECTHEADR)
}
if ld.INITDAT == -1 {
ld.INITDAT = 0
}
if ld.INITRND == -1 {
ld.INITRND = ld.PESECTALIGN
}
}
if ld.INITDAT != 0 && ld.INITRND != 0 {
fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(ld.INITDAT), uint32(ld.INITRND))
}
}
| rchinit()
ld.Ldmain()
}
func l |
git_cl_test.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_cl.py."""
import contextlib
import datetime
import json
import logging
import os
import StringIO
import sys
import tempfile
import unittest
import urlparse
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.auto_stub import TestCase
import metrics
# We have to disable monitoring before importing git_cl.
metrics.DISABLE_METRICS_COLLECTION = True
import gerrit_util
import git_cl
import git_common
import git_footers
import subprocess2
def callError(code=1, cmd='', cwd='', stdout='', stderr=''):
return subprocess2.CalledProcessError(code, cmd, cwd, stdout, stderr)
CERR1 = callError(1)
def MakeNamedTemporaryFileMock(expected_content):
class NamedTemporaryFileMock(object):
def __init__(self, *args, **kwargs):
self.name = '/tmp/named'
self.expected_content = expected_content
def __enter__(self):
return self
def __exit__(self, _type, _value, _tb):
pass
def write(self, content):
if self.expected_content:
assert content == self.expected_content
def close(self):
pass
return NamedTemporaryFileMock
class ChangelistMock(object):
# A class variable so we can access it when we don't have access to the
# instance that's being set.
desc = ""
def __init__(self, **kwargs):
pass
def GetIssue(self):
return 1
def GetDescription(self, force=False):
return ChangelistMock.desc
def UpdateDescription(self, desc, force=False):
ChangelistMock.desc = desc
class PresubmitMock(object):
def __init__(self, *args, **kwargs):
self.reviewers = []
self.more_cc = ['[email protected]']
@staticmethod
def should_continue():
return True
class GitCheckoutMock(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def reset():
GitCheckoutMock.conflict = False
def apply_patch(self, p):
if GitCheckoutMock.conflict:
raise Exception('failed')
class WatchlistsMock(object):
def __init__(self, _):
pass
@staticmethod
def GetWatchersForPaths(_):
return ['[email protected]']
class CodereviewSettingsFileMock(object):
def __init__(self):
pass
# pylint: disable=no-self-use
def read(self):
return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" +
"GERRIT_HOST: True\n")
class AuthenticatorMock(object):
def __init__(self, *_args):
pass
def has_cached_credentials(self):
return True
def authorize(self, http):
return http
def CookiesAuthenticatorMockFactory(hosts_with_creds=None, same_auth=False):
"""Use to mock Gerrit/Git credentials from ~/.netrc or ~/.gitcookies.
Usage:
>>> self.mock(git_cl.gerrit_util, "CookiesAuthenticator",
CookiesAuthenticatorMockFactory({'host': ('user', _, 'pass')})
OR
>>> self.mock(git_cl.gerrit_util, "CookiesAuthenticator",
CookiesAuthenticatorMockFactory(
same_auth=('user', '', 'pass'))
"""
class CookiesAuthenticatorMock(git_cl.gerrit_util.CookiesAuthenticator):
def __init__(self): # pylint: disable=super-init-not-called
# Intentionally not calling super() because it reads actual cookie files.
pass
@classmethod
def get_gitcookies_path(cls):
return '~/.gitcookies'
@classmethod
def get_netrc_path(cls):
return '~/.netrc'
def _get_auth_for_host(self, host):
if same_auth:
return same_auth
return (hosts_with_creds or {}).get(host)
return CookiesAuthenticatorMock
class MockChangelistWithBranchAndIssue():
def __init__(self, branch, issue):
self.branch = branch
self.issue = issue
def GetBranch(self):
return self.branch
def GetIssue(self):
return self.issue
class SystemExitMock(Exception):
pass
class TestGitClBasic(unittest.TestCase):
def test_get_description(self):
cl = git_cl.Changelist(issue=1, codereview='gerrit',
codereview_host='host')
cl.description = 'x'
cl.has_description = True
cl._codereview_impl.FetchDescription = lambda *a, **kw: 'y'
self.assertEquals(cl.GetDescription(), 'x')
self.assertEquals(cl.GetDescription(force=True), 'y')
self.assertEquals(cl.GetDescription(), 'y')
def test_description_footers(self):
cl = git_cl.Changelist(issue=1, codereview='gerrit',
codereview_host='host')
cl.description = '\n'.join([
'This is some message',
'',
'It has some lines',
'and, also',
'',
'Some: Really',
'Awesome: Footers',
])
cl.has_description = True
cl._codereview_impl.UpdateDescriptionRemote = lambda *a, **kw: 'y'
msg, footers = cl.GetDescriptionFooters()
self.assertEquals(
msg, ['This is some message', '', 'It has some lines', 'and, also'])
self.assertEquals(footers, [('Some', 'Really'), ('Awesome', 'Footers')])
msg.append('wut')
footers.append(('gnarly-dude', 'beans'))
cl.UpdateDescriptionFooters(msg, footers)
self.assertEquals(cl.GetDescription().splitlines(), [
'This is some message',
'',
'It has some lines',
'and, also',
'wut'
'',
'Some: Really',
'Awesome: Footers',
'Gnarly-Dude: beans',
])
def test_get_bug_line_values(self):
f = lambda p, bugs: list(git_cl._get_bug_line_values(p, bugs))
self.assertEqual(f('', ''), [])
self.assertEqual(f('', '123,v8:456'), ['123', 'v8:456'])
self.assertEqual(f('v8', '456'), ['v8:456'])
self.assertEqual(f('v8', 'chromium:123,456'), ['v8:456', 'chromium:123'])
# Not nice, but not worth carying.
self.assertEqual(f('v8', 'chromium:123,456,v8:123'),
['v8:456', 'chromium:123', 'v8:123'])
def _test_git_number(self, parent_msg, dest_ref, child_msg,
parent_hash='parenthash'):
desc = git_cl.ChangeDescription(child_msg)
desc.update_with_git_number_footers(parent_hash, parent_msg, dest_ref)
return desc.description
def assertEqualByLine(self, actual, expected):
self.assertEqual(actual.splitlines(), expected.splitlines())
def test_git_number_bad_parent(self):
with self.assertRaises(ValueError):
self._test_git_number('Parent', 'refs/heads/master', 'Child')
def test_git_number_bad_parent_footer(self):
with self.assertRaises(AssertionError):
self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: wrong',
'refs/heads/master', 'Child')
def test_git_number_bad_lineage_ignored(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#1}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}',
'refs/heads/master', 'Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#2}\n'
'Cr-Branched-From: mustBeReal40CharHash-branch@{#pos}')
def test_git_number_same_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_mixed_footers(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Broken-by: design\n'
'BUG=123')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Broken-by: design\n'
'BUG=123\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_same_branch_with_originals(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/master',
child_msg='Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Whatever: value\n'
'Cr-Commit-Position: refs/copy/paste@{#22}')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Some users are smart and insert their own footers\n'
'\n'
'Cr-Original-Whatever: value\n'
'Cr-Original-Commit-Position: refs/copy/paste@{#22}\n'
'Cr-Commit-Position: refs/heads/master@{#13}')
def test_git_number_new_branch(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/master@{#12}')
def test_git_number_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_moooooooore_lineage(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/mooore',
child_msg='Child')
self.assertEqualByLine(
actual,
'Child\n'
'\n'
'Cr-Commit-Position: refs/heads/mooore@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/branch@{#5}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_git_number_ever_moooooooore_lineage(self):
self.maxDiff = 10000 # pylint: disable=attribute-defined-outside-init
actual = self._test_git_number(
'CQ commit on fresh new branch + numbering.\n'
'\n'
'NOTRY=True\n'
'NOPRESUBMIT=True\n'
'BUG=\n'
'\n'
'Review-Url: https://codereview.chromium.org/2577703003\n'
'Cr-Commit-Position: refs/heads/gnumb-test/br@{#1}\n'
'Cr-Branched-From: 0749ff9edc-refs/heads/gnumb-test/cq@{#4}\n'
'Cr-Branched-From: 5c49df2da6-refs/heads/master@{#41618}',
dest_ref='refs/heads/gnumb-test/cl',
child_msg='git cl on fresh new branch + numbering.\n'
'\n'
'Review-Url: https://codereview.chromium.org/2575043003 .\n')
self.assertEqualByLine(
actual,
'git cl on fresh new branch + numbering.\n'
'\n'
'Review-Url: https://codereview.chromium.org/2575043003 .\n'
'Cr-Commit-Position: refs/heads/gnumb-test/cl@{#1}\n'
'Cr-Branched-From: parenthash-refs/heads/gnumb-test/br@{#1}\n'
'Cr-Branched-From: 0749ff9edc-refs/heads/gnumb-test/cq@{#4}\n'
'Cr-Branched-From: 5c49df2da6-refs/heads/master@{#41618}')
def test_git_number_cherry_pick(self):
actual = self._test_git_number(
'Parent\n'
'\n'
'Cr-Commit-Position: refs/heads/branch@{#1}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}',
dest_ref='refs/heads/branch',
child_msg='Child, which is cherry-pick from master\n'
'\n'
'Cr-Commit-Position: refs/heads/master@{#100}\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)')
self.assertEqualByLine(
actual,
'Child, which is cherry-pick from master\n'
'\n'
'(cherry picked from commit deadbeef12345678deadbeef12345678deadbeef)\n'
'\n'
'Cr-Original-Commit-Position: refs/heads/master@{#100}\n'
'Cr-Commit-Position: refs/heads/branch@{#2}\n'
'Cr-Branched-From: somehash-refs/heads/master@{#12}')
def test_gerrit_mirror_hack(self):
cr = 'chromium-review.googlesource.com'
url0 = 'https://%s/a/changes/x?a=b' % cr
origMirrors = git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES
try:
git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES = ['us1', 'us2']
url1 = git_cl.gerrit_util._UseGerritMirror(url0, cr)
url2 = git_cl.gerrit_util._UseGerritMirror(url1, cr)
url3 = git_cl.gerrit_util._UseGerritMirror(url2, cr)
self.assertNotEqual(url1, url2)
self.assertEqual(sorted((url1, url2)), [
'https://us1-mirror-chromium-review.googlesource.com/a/changes/x?a=b',
'https://us2-mirror-chromium-review.googlesource.com/a/changes/x?a=b'])
self.assertEqual(url1, url3)
finally:
git_cl.gerrit_util._GERRIT_MIRROR_PREFIXES = origMirrors
def test_valid_accounts(self):
mock_per_account = {
'u1': None, # 404, doesn't exist.
'u2': {
'_account_id': 123124,
'avatars': [],
'email': '[email protected]',
'name': 'User Number 2',
'status': 'OOO',
},
'u3': git_cl.gerrit_util.GerritError(500, 'retries didn\'t help :('),
}
def GetAccountDetailsMock(_, account):
# Poor-man's mock library's side_effect.
v = mock_per_account.pop(account)
if isinstance(v, Exception):
raise v
return v
original = git_cl.gerrit_util.GetAccountDetails
try:
git_cl.gerrit_util.GetAccountDetails = GetAccountDetailsMock
actual = git_cl.gerrit_util.ValidAccounts(
'host', ['u1', 'u2', 'u3'], max_threads=1)
finally:
git_cl.gerrit_util.GetAccountDetails = original
self.assertEqual(actual, {
'u2': {
'_account_id': 123124,
'avatars': [],
'email': '[email protected]',
'name': 'User Number 2',
'status': 'OOO',
},
})
class TestParseIssueURL(unittest.TestCase):
def _validate(self, parsed, issue=None, patchset=None, hostname=None,
codereview=None, fail=False):
self.assertIsNotNone(parsed)
if fail:
self.assertFalse(parsed.valid)
return
self.assertTrue(parsed.valid)
self.assertEqual(parsed.issue, issue)
self.assertEqual(parsed.patchset, patchset)
self.assertEqual(parsed.hostname, hostname)
self.assertEqual(parsed.codereview, codereview)
def _run_and_validate(self, func, url, *args, **kwargs):
result = func(urlparse.urlparse(url))
if kwargs.pop('fail', False):
self.assertIsNone(result)
return None
self._validate(result, *args, fail=False, **kwargs)
def test_gerrit(self):
def test(url, issue=None, patchset=None, hostname=None, fail=None):
self._test_ParseIssueUrl(
git_cl._GerritChangelistImpl.ParseIssueURL,
url, issue, patchset, hostname, fail)
def test(url, *args, **kwargs):
self._run_and_validate(git_cl._GerritChangelistImpl.ParseIssueURL, url,
*args, codereview='gerrit', **kwargs)
test('http://chrome-review.source.com/c/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/#/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/123',
123, None, 'chrome-review.source.com')
test('https://chrome-review.source.com/123/4',
123, 4, 'chrome-review.source.com')
test('https://chrome-review.source.com/c/123/1/whatisthis', fail=True)
test('https://chrome-review.source.com/c/abc/', fail=True)
test('ssh://chrome-review.source.com/c/123/1/', fail=True)
def test_ParseIssueNumberArgument(self):
def test(arg, *args, **kwargs):
codereview_hint = kwargs.pop('hint', None)
self._validate(git_cl.ParseIssueNumberArgument(arg, codereview_hint),
*args, **kwargs)
test('123', 123)
test('', fail=True)
test('abc', fail=True)
test('123/1', fail=True)
test('123a', fail=True)
test('ssh://chrome-review.source.com/#/c/123/4/', fail=True)
# Looks like Rietveld and Gerrit, but we should select Gerrit now
# w/ or w/o hint.
test('https://codereview.source.com/123',
123, None, 'codereview.source.com', 'gerrit',
hint='gerrit')
test('https://codereview.source.com/123',
123, None, 'codereview.source.com', 'gerrit')
# Gerrrit.
test('https://chrome-review.source.com/c/123/4',
123, 4, 'chrome-review.source.com', 'gerrit')
test('https://chrome-review.source.com/bad/123/4', fail=True)
class GitCookiesCheckerTest(TestCase):
def setUp(self):
super(GitCookiesCheckerTest, self).setUp()
self.c = git_cl._GitCookiesChecker()
self.c._all_hosts = []
def mock_hosts_creds(self, subhost_identity_pairs):
def ensure_googlesource(h):
if not h.endswith(self.c._GOOGLESOURCE):
assert not h.endswith('.')
return h + '.' + self.c._GOOGLESOURCE
return h
self.c._all_hosts = [(ensure_googlesource(h), i, '.gitcookies')
for h, i in subhost_identity_pairs]
def test_identity_parsing(self):
self.assertEqual(self.c._parse_identity('ldap.google.com'),
('ldap', 'google.com'))
self.assertEqual(self.c._parse_identity('git-ldap.example.com'),
('ldap', 'example.com'))
# Specical case because we know there are no subdomains in chromium.org.
self.assertEqual(self.c._parse_identity('git-note.period.chromium.org'),
('note.period', 'chromium.org'))
# Pathological: ".period." can be either username OR domain, more likely
# domain.
self.assertEqual(self.c._parse_identity('git-note.period.example.com'),
('note', 'period.example.com'))
def test_analysis_nothing(self):
self.c._all_hosts = []
self.assertFalse(self.c.has_generic_host())
self.assertEqual(set(), self.c.get_conflicting_hosts())
self.assertEqual(set(), self.c.get_duplicated_hosts())
self.assertEqual(set(), self.c.get_partially_configured_hosts())
self.assertEqual(set(), self.c.get_hosts_with_wrong_identities())
def test_analysis(self):
self.mock_hosts_creds([
('.googlesource.com', 'git-example.chromium.org'),
('chromium', 'git-example.google.com'),
('chromium-review', 'git-example.google.com'),
('chrome-internal', 'git-example.chromium.org'),
('chrome-internal-review', 'git-example.chromium.org'),
('conflict', 'git-example.google.com'),
('conflict-review', 'git-example.chromium.org'),
('dup', 'git-example.google.com'),
('dup', 'git-example.google.com'),
('dup-review', 'git-example.google.com'),
('partial', 'git-example.google.com'),
('gpartial-review', 'git-example.google.com'),
])
self.assertTrue(self.c.has_generic_host())
self.assertEqual(set(['conflict.googlesource.com']),
self.c.get_conflicting_hosts())
self.assertEqual(set(['dup.googlesource.com']),
self.c.get_duplicated_hosts())
self.assertEqual(set(['partial.googlesource.com',
'gpartial-review.googlesource.com']),
self.c.get_partially_configured_hosts())
self.assertEqual(set(['chromium.googlesource.com',
'chrome-internal.googlesource.com']),
self.c.get_hosts_with_wrong_identities())
def test_report_no_problems(self):
self.test_analysis_nothing()
self.mock(sys, 'stdout', StringIO.StringIO())
self.assertFalse(self.c.find_and_report_problems())
self.assertEqual(sys.stdout.getvalue(), '')
def test_report(self):
self.test_analysis()
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util.CookiesAuthenticator, 'get_gitcookies_path',
classmethod(lambda _: '~/.gitcookies'))
self.assertTrue(self.c.find_and_report_problems())
with open(os.path.join(os.path.dirname(__file__),
'git_cl_creds_check_report.txt')) as f:
expected = f.read()
def by_line(text):
return [l.rstrip() for l in text.rstrip().splitlines()]
self.maxDiff = 10000 # pylint: disable=attribute-defined-outside-init
self.assertEqual(by_line(sys.stdout.getvalue().strip()), by_line(expected))
class TestGitCl(TestCase):
def setUp(self):
super(TestGitCl, self).setUp()
self.calls = []
self._calls_done = []
self.mock(git_cl, 'time_time',
lambda: self._mocked_call('time.time'))
self.mock(git_cl.metrics.collector, 'add_repeated',
lambda *a: self._mocked_call('add_repeated', *a))
self.mock(subprocess2, 'call', self._mocked_call)
self.mock(subprocess2, 'check_call', self._mocked_call)
self.mock(subprocess2, 'check_output', self._mocked_call)
self.mock(subprocess2, 'communicate',
lambda *a, **kw: ([self._mocked_call(*a, **kw), ''], 0))
self.mock(git_cl.gclient_utils, 'CheckCallAndFilter', self._mocked_call)
self.mock(git_common, 'is_dirty_git_tree', lambda x: False)
self.mock(git_common, 'get_or_create_merge_base',
lambda *a: (
self._mocked_call(['get_or_create_merge_base']+list(a))))
self.mock(git_cl, 'BranchExists', lambda _: True)
self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '')
self.mock(git_cl, 'SaveDescriptionBackup', lambda _:
self._mocked_call('SaveDescriptionBackup'))
self.mock(git_cl, 'ask_for_data', lambda *a, **k: self._mocked_call(
*(['ask_for_data'] + list(a)), **k))
self.mock(git_cl, 'write_json', lambda path, contents:
self._mocked_call('write_json', path, contents))
self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock)
self.mock(git_cl.checkout, 'GitCheckout', GitCheckoutMock)
GitCheckoutMock.reset()
self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock)
self.mock(git_cl.auth, 'get_authenticator_for_host', AuthenticatorMock)
self.mock(git_cl.gerrit_util, 'GetChangeDetail',
lambda *args, **kwargs: self._mocked_call(
'GetChangeDetail', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'GetChangeComments',
lambda *args, **kwargs: self._mocked_call(
'GetChangeComments', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'GetChangeRobotComments',
lambda *args, **kwargs: self._mocked_call(
'GetChangeRobotComments', *args, **kwargs))
self.mock(git_cl.gerrit_util, 'AddReviewers',
lambda h, i, reviewers, ccs, notify: self._mocked_call(
'AddReviewers', h, i, reviewers, ccs, notify))
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, msg=None, labels=None, notify=None:
self._mocked_call('SetReview', h, i, msg, labels, notify))
self.mock(git_cl.gerrit_util.LuciContextAuthenticator, 'is_luci',
staticmethod(lambda: False))
self.mock(git_cl.gerrit_util.GceAuthenticator, 'is_gce',
classmethod(lambda _: False))
self.mock(git_cl.gerrit_util, 'ValidAccounts',
lambda host, accounts:
self._mocked_call('ValidAccounts', host, accounts))
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call(['DieWithError', msg]))
# It's important to reset settings to not have inter-tests interference.
git_cl.settings = None
def tearDown(self):
try:
self.assertEquals([], self.calls)
except AssertionError:
if not self.has_failed():
raise
# Sadly, has_failed() returns True if this OR any other tests before this
# one have failed.
git_cl.logging.error(
'!!!!!! IF YOU SEE THIS, READ BELOW, IT WILL SAVE YOUR TIME !!!!!\n'
'There are un-consumed self.calls after this test has finished.\n'
'If you don\'t know which test this is, run:\n'
' tests/git_cl_tests.py -v\n'
'If you are already running only this test, then **first** fix the '
'problem whose exception is emitted below by unittest runner.\n'
'Else, to be sure what\'s going on, run this test **alone** with \n'
' tests/git_cl_tests.py TestGitCl.<name>\n'
'and follow instructions above.\n' +
'=' * 80)
finally:
super(TestGitCl, self).tearDown()
def _mocked_call(self, *args, **_kwargs):
self.assertTrue(
self.calls,
'@%d Expected: <Missing> Actual: %r' % (len(self._calls_done), args))
top = self.calls.pop(0)
expected_args, result = top
# Also logs otherwise it could get caught in a try/finally and be hard to
# diagnose.
if expected_args != args:
N = 5
prior_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) - N + i, c[0])
for i, c in enumerate(self._calls_done[-N:]))
following_calls = '\n '.join(
'@%d: %r' % (len(self._calls_done) + i + 1, c[0])
for i, c in enumerate(self.calls[:N]))
extended_msg = (
'A few prior calls:\n %s\n\n'
'This (expected):\n @%d: %r\n'
'This (actual):\n @%d: %r\n\n'
'A few following expected calls:\n %s' %
(prior_calls, len(self._calls_done), expected_args,
len(self._calls_done), args, following_calls))
git_cl.logging.error(extended_msg)
self.fail('@%d\n'
' Expected: %r\n'
' Actual: %r' % (
len(self._calls_done), expected_args, args))
self._calls_done.append(top)
if isinstance(result, Exception):
raise result
return result
def test_ask_for_explicit_yes_true(self):
self.calls = [
(('ask_for_data', 'prompt [Yes/No]: '), 'blah'),
(('ask_for_data', 'Please, type yes or no: '), 'ye'),
]
self.assertTrue(git_cl.ask_for_explicit_yes('prompt'))
def test_LoadCodereviewSettingsFromFile_gerrit(self):
codereview_file = StringIO.StringIO('GERRIT_HOST: true')
self.calls = [
((['git', 'config', '--unset-all', 'rietveld.cc'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.tree-status-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.viewvc-url'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-regex'],), CERR1),
((['git', 'config', '--unset-all', 'rietveld.cpplint-ignore-regex'],),
CERR1),
((['git', 'config', '--unset-all', 'rietveld.run-post-upload-hook'],),
CERR1),
((['git', 'config', 'gerrit.host', 'true'],), ''),
]
self.assertIsNone(git_cl.LoadCodereviewSettingsFromFile(codereview_file))
@classmethod
def _is_gerrit_calls(cls, gerrit=False):
return [((['git', 'config', 'rietveld.autoupdate'],), ''),
((['git', 'config', 'gerrit.host'],), 'True' if gerrit else '')]
@classmethod
def _git_post_upload_calls(cls):
return [
((['git', 'rev-parse', 'HEAD'],), 'hash'),
((['git', 'symbolic-ref', 'HEAD'],), 'hash'),
((['git',
'config', 'branch.hash.last-upload-hash', 'hash'],), ''),
((['git', 'config', 'rietveld.run-post-upload-hook'],), ''),
]
@staticmethod
def _git_sanity_checks(diff_base, working_branch, get_remote_branch=True):
fake_ancestor = 'fake_ancestor'
fake_cl = 'fake_cl_for_patch'
return [
((['git',
'rev-parse', '--verify', diff_base],), fake_ancestor),
((['git',
'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor),
((['git',
'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl),
# Mock a config miss (error code 1)
((['git',
'config', 'gitcl.remotebranch'],), CERR1),
] + ([
# Call to GetRemoteBranch()
((['git',
'config', 'branch.%s.merge' % working_branch],),
'refs/heads/master'),
((['git',
'config', 'branch.%s.remote' % working_branch],), 'origin'),
] if get_remote_branch else []) + [
((['git', 'rev-list', '^' + fake_ancestor,
'refs/remotes/origin/master'],), ''),
]
@classmethod
def _gerrit_ensure_auth_calls(
cls, issue=None, skip_auth_check=False, short_hostname='chromium'):
cmd = ['git', 'config', '--bool', 'gerrit.skip-ensure-authenticated']
if skip_auth_check:
return [((cmd, ), 'true')]
calls = [((cmd, ), CERR1)]
if issue:
calls.extend([
((['git', 'config', 'branch.master.gerritserver'],), CERR1),
])
calls.extend([
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://%s.googlesource.com/my/repo' % short_hostname),
])
return calls
@classmethod
def _gerrit_base_calls(cls, issue=None, fetched_description=None,
fetched_status=None, other_cl_owner=None,
custom_cl_base=None, short_hostname='chromium'):
calls = cls._is_gerrit_calls(True)
calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],),
CERR1 if issue is None else str(issue)),
]
if custom_cl_base:
ancestor_revision = custom_cl_base
else:
# Determine ancestor_revision to be merge base.
ancestor_revision = 'fake_ancestor_sha'
calls += [
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],), ancestor_revision),
]
# Calls to verify branch point is ancestor
calls += cls._gerrit_ensure_auth_calls(
issue=issue, short_hostname=short_hostname)
if issue:
calls += [
(('GetChangeDetail', '%s-review.googlesource.com' % short_hostname,
'my%2Frepo~123456',
['DETAILED_ACCOUNTS', 'CURRENT_REVISION', 'CURRENT_COMMIT', 'LABELS']
),
{
'owner': {'email': (other_cl_owner or '[email protected]')},
'change_id': '123456789',
'current_revision': 'sha1_of_current_revision',
'revisions': { 'sha1_of_current_revision': {
'commit': {'message': fetched_description},
}},
'status': fetched_status or 'NEW',
}),
]
if fetched_status == 'ABANDONED':
calls += [
(('DieWithError', 'Change https://%s-review.googlesource.com/'
'123456 has been abandoned, new uploads are not '
'allowed' % short_hostname), SystemExitMock()),
]
return calls
if other_cl_owner:
calls += [
(('ask_for_data', 'Press Enter to upload, or Ctrl+C to abort'), ''),
]
calls += cls._git_sanity_checks(ancestor_revision, 'master',
get_remote_branch=False)
calls += [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'rev-parse', 'HEAD'],), '12345'),
((['git', '-c', 'core.quotePath=false', 'diff', '--name-status',
'--no-renames', '-r', ancestor_revision + '...', '.'],),
'M\t.gitignore\n'),
((['git', 'config', 'branch.master.gerritpatchset'],), CERR1),
]
if not issue:
calls += [
((['git', 'log', '--pretty=format:%s%n%n%b',
ancestor_revision + '...'],),
'foo'),
]
calls += [
((['git', 'config', 'user.email'],), '[email protected]'),
((['git', 'diff', '--no-ext-diff', '--stat', '-l100000', '-C50'] +
([custom_cl_base] if custom_cl_base else
[ancestor_revision, 'HEAD']),),
'+dat'),
]
return calls
@classmethod
def _gerrit_upload_calls(cls, description, reviewers, squash,
squash_mode='default',
expected_upstream_ref='origin/refs/heads/master',
title=None, notify=False,
post_amend_description=None, issue=None, cc=None,
custom_cl_base=None, tbr=None,
short_hostname='chromium',
labels=None):
if post_amend_description is None:
post_amend_description = description
cc = cc or []
# Determined in `_gerrit_base_calls`.
determined_ancestor_revision = custom_cl_base or 'fake_ancestor_sha'
calls = []
if squash_mode == 'default':
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],), ''),
((['git', 'config', '--bool', 'gerrit.squash-uploads'],), ''),
])
elif squash_mode in ('override_squash', 'override_nosquash'):
calls.extend([
((['git', 'config', '--bool', 'gerrit.override-squash-uploads'],),
'true' if squash_mode == 'override_squash' else 'false'),
])
else:
assert squash_mode in ('squash', 'nosquash')
# If issue is given, then description is fetched from Gerrit instead.
if issue is None:
calls += [
((['git', 'log', '--pretty=format:%s\n\n%b',
((custom_cl_base + '..') if custom_cl_base else
'fake_ancestor_sha..HEAD')],),
description),
]
if squash:
title = 'Initial_upload'
else:
if not title:
calls += [
((['git', 'show', '-s', '--format=%s', 'HEAD'],), ''),
(('ask_for_data', 'Title for patchset []: '), 'User input'),
]
title = 'User_input'
if not git_footers.get_footer_change_id(description) and not squash:
calls += [
(('DownloadGerritHook', False), ''),
# Amending of commit message to get the Change-Id.
((['git', 'log', '--pretty=format:%s\n\n%b',
determined_ancestor_revision + '..HEAD'],),
description),
((['git', 'commit', '--amend', '-m', description],), ''),
((['git', 'log', '--pretty=format:%s\n\n%b',
determined_ancestor_revision + '..HEAD'],),
post_amend_description)
]
if squash:
if not issue:
# Prompting to edit description on first upload.
calls += [
((['git', 'config', 'core.editor'],), ''),
((['RunEditor'],), description),
]
ref_to_push = 'abcdef0123456789'
calls += [
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
]
if custom_cl_base is None:
calls += [
((['get_or_create_merge_base', 'master',
'refs/remotes/origin/master'],),
'origin/master'),
]
parent = 'origin/master'
else:
calls += [
((['git', 'merge-base', '--is-ancestor', custom_cl_base,
'refs/remotes/origin/master'],),
callError(1)), # Means not ancenstor.
(('ask_for_data',
'Do you take responsibility for cleaning up potential mess '
'resulting from proceeding with upload? Press Enter to upload, '
'or Ctrl+C to abort'), ''),
]
parent = custom_cl_base
calls += [
((['git', 'rev-parse', 'HEAD:'],), # `HEAD:` means HEAD's tree hash.
'0123456789abcdef'),
((['git', 'commit-tree', '0123456789abcdef', '-p', parent,
'-F', '/tmp/named'],),
ref_to_push),
]
else:
ref_to_push = 'HEAD'
calls += [
(('SaveDescriptionBackup',), None),
((['git', 'rev-list',
(custom_cl_base if custom_cl_base else expected_upstream_ref) + '..' +
ref_to_push],),
'1hashPerLine\n'),
]
metrics_arguments = []
if notify:
ref_suffix = '%ready,notify=ALL'
metrics_arguments += ['ready', 'notify=ALL']
else:
if not issue and squash:
ref_suffix = '%wip'
metrics_arguments.append('wip')
else:
ref_suffix = '%notify=NONE'
metrics_arguments.append('notify=NONE')
if title:
ref_suffix += ',m=' + title
metrics_arguments.append('m')
calls += [
((['git', 'config', 'rietveld.cc'],), ''),
]
if short_hostname == 'chromium':
# All reviwers and ccs get into ref_suffix.
for r in sorted(reviewers):
ref_suffix += ',r=%s' % r
metrics_arguments.append('r')
for c in sorted(['[email protected]',
'[email protected]'] + cc):
ref_suffix += ',cc=%s' % c
metrics_arguments.append('cc')
reviewers, cc = [], []
else:
# TODO(crbug/877717): remove this case.
calls += [
(('ValidAccounts', '%s-review.googlesource.com' % short_hostname,
sorted(reviewers) + ['[email protected]',
'[email protected]'] + cc),
{
e: {'email': e}
for e in (reviewers + ['[email protected]'] + cc)
})
]
for r in sorted(reviewers):
if r != 'bad-account-or-email':
ref_suffix += ',r=%s' % r
metrics_arguments.append('r')
reviewers.remove(r)
for c in sorted(['[email protected]'] + cc):
ref_suffix += ',cc=%s' % c
metrics_arguments.append('cc')
if c in cc:
cc.remove(c)
for k, v in sorted((labels or {}).items()):
ref_suffix += ',l=%s+%d' % (k, v)
metrics_arguments.append('l=%s+%d' % (k, v))
if tbr:
calls += [
(('GetCodeReviewTbrScore',
'%s-review.googlesource.com' % short_hostname,
'my/repo'),
2,),
]
calls += [
(('time.time',), 1000,),
((['git', 'push',
'https://%s.googlesource.com/my/repo' % short_hostname,
ref_to_push + ':refs/for/refs/heads/master' + ref_suffix],),
(('remote:\n'
'remote: Processing changes: (\)\n'
'remote: Processing changes: (|)\n'
'remote: Processing changes: (/)\n'
'remote: Processing changes: (-)\n'
'remote: Processing changes: new: 1 (/)\n'
'remote: Processing changes: new: 1, done\n'
'remote:\n'
'remote: New Changes:\n'
'remote: https://%s-review.googlesource.com/#/c/my/repo/+/123456'
' XXX\n'
'remote:\n'
'To https://%s.googlesource.com/my/repo\n'
' * [new branch] hhhh -> refs/for/refs/heads/master\n'
) % (short_hostname, short_hostname)),),
(('time.time',), 2000,),
(('add_repeated',
'sub_commands',
{
'execution_time': 1000,
'command': 'git push',
'exit_code': 0,
'arguments': sorted(metrics_arguments),
}),
None,),
]
if squash:
calls += [
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash',
'abcdef0123456789'],), ''),
]
# TODO(crbug/877717): this should never be used.
if squash and short_hostname != 'chromium':
calls += [
(('AddReviewers',
'chromium-review.googlesource.com', 'my%2Frepo~123456',
sorted(reviewers),
cc + ['[email protected]'],
notify),
''),
]
calls += cls._git_post_upload_calls()
return calls
def _run_gerrit_upload_test(
self,
upload_args,
description,
reviewers=None,
squash=True,
squash_mode=None,
expected_upstream_ref='origin/refs/heads/master',
title=None,
notify=False,
post_amend_description=None,
issue=None,
cc=None,
fetched_status=None,
other_cl_owner=None,
custom_cl_base=None,
tbr=None,
short_hostname='chromium',
labels=None):
"""Generic gerrit upload test framework."""
if squash_mode is None:
if '--no-squash' in upload_args:
squash_mode = 'nosquash'
elif '--squash' in upload_args:
squash_mode = 'squash'
else:
squash_mode = 'default'
reviewers = reviewers or []
cc = cc or []
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(
same_auth=('git-owner.example.com', '', 'pass')))
self.mock(git_cl._GerritChangelistImpl, '_GerritCommitMsgHookCheck',
lambda _, offer_removal: None)
self.mock(git_cl.gclient_utils, 'RunEditor',
lambda *_, **__: self._mocked_call(['RunEditor']))
self.mock(git_cl, 'DownloadGerritHook', lambda force: self._mocked_call(
'DownloadGerritHook', force))
self.calls = self._gerrit_base_calls(
issue=issue,
fetched_description=description,
fetched_status=fetched_status,
other_cl_owner=other_cl_owner,
custom_cl_base=custom_cl_base,
short_hostname=short_hostname)
if fetched_status != 'ABANDONED':
self.mock(tempfile, 'NamedTemporaryFile', MakeNamedTemporaryFileMock(
expected_content=description))
self.mock(os, 'remove', lambda _: True)
self.calls += self._gerrit_upload_calls(
description, reviewers, squash,
squash_mode=squash_mode,
expected_upstream_ref=expected_upstream_ref,
title=title, notify=notify,
post_amend_description=post_amend_description,
issue=issue, cc=cc,
custom_cl_base=custom_cl_base, tbr=tbr,
short_hostname=short_hostname,
labels=labels)
# Uncomment when debugging.
# print '\n'.join(map(lambda x: '%2i: %s' % x, enumerate(self.calls)))
git_cl.main(['upload'] + upload_args)
def test_gerrit_upload_without_change_id(self):
self._run_gerrit_upload_test(
['--no-squash'],
'desc\n\nBUG=\n',
[],
squash=False,
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_upload_without_change_id_override_nosquash(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n',
[],
squash=False,
squash_mode='override_nosquash',
post_amend_description='desc\n\nBUG=\n\nChange-Id: Ixxx')
def test_gerrit_no_reviewer(self):
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash')
def test_gerrit_no_reviewer_non_chromium_host(self):
# TODO(crbug/877717): remove this test case.
self._run_gerrit_upload_test(
[],
'desc\n\nBUG=\n\nChange-Id: I123456789\n',
[],
squash=False,
squash_mode='override_nosquash',
short_hostname='other')
def test_gerrit_patchset_title_special_chars(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self._run_gerrit_upload_test(
['-f', '-t', 'We\'ll escape ^_ ^ special chars...@{u}'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
squash=False,
squash_mode='override_nosquash',
title='We%27ll_escape_%5E%5F_%5E_special_chars%2E%2E%2E%40%7Bu%7D')
def test_gerrit_reviewers_cmd_line(self):
self._run_gerrit_upload_test(
['-r', '[email protected]', '--send-mail'],
'desc\n\nBUG=\n\nChange-Id: I123456789',
['[email protected]'],
squash=False,
squash_mode='override_nosquash',
notify=True)
def test_gerrit_reviewer_multiple(self):
self.mock(git_cl.gerrit_util, 'GetCodeReviewTbrScore',
lambda *a: self._mocked_call('GetCodeReviewTbrScore', *a))
self._run_gerrit_upload_test(
[],
'desc\[email protected]\nBUG=\[email protected]\n'
'[email protected],[email protected]\n\n'
'Change-Id: 123456789',
['[email protected]', '[email protected]'],
expected_upstream_ref='origin/master',
cc=['[email protected]', '[email protected]'],
tbr='[email protected]',
labels={'Code-Review': 2})
def test_gerrit_upload_squash_first_is_default(self):
self._run_gerrit_upload_test(
[],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first(self):
self._run_gerrit_upload_test(
['--squash'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master')
def test_gerrit_upload_squash_first_with_labels(self):
self._run_gerrit_upload_test(
['--squash', '--cq-dry-run', '--enable-auto-submit'],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master',
labels={'Commit-Queue': 1, 'Auto-Submit': 1})
def test_gerrit_upload_squash_first_against_rev(self):
custom_cl_base = 'custom_cl_base_rev_or_branch'
self._run_gerrit_upload_test(
['--squash', custom_cl_base],
'desc\nBUG=\n\nChange-Id: 123456789',
[],
squash=True,
expected_upstream_ref='origin/master',
custom_cl_base=custom_cl_base)
self.assertIn(
'If you proceed with upload, more than 1 CL may be created by Gerrit',
sys.stdout.getvalue())
def test_gerrit_upload_squash_reupload(self):
description = 'desc\nBUG=\n\nChange-Id: 123456789'
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456)
def test_gerrit_upload_squash_reupload_to_abandoned(self):
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call('DieWithError', msg))
description = 'desc\nBUG=\n\nChange-Id: 123456789'
with self.assertRaises(SystemExitMock):
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456,
fetched_status='ABANDONED')
def test_gerrit_upload_squash_reupload_to_not_owned(self):
self.mock(git_cl.gerrit_util, 'GetAccountDetails',
lambda *_, **__: {'email': '[email protected]'})
description = 'desc\nBUG=\n\nChange-Id: 123456789'
self._run_gerrit_upload_test(
['--squash'],
description,
[],
squash=True,
expected_upstream_ref='origin/master',
issue=123456,
other_cl_owner='[email protected]')
self.assertIn(
'WARNING: Change 123456 is owned by [email protected], but you '
'authenticate to Gerrit as [email protected].\n'
'Uploading may fail due to lack of permissions',
git_cl.sys.stdout.getvalue())
def test_upload_branch_deps(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
def mock_run_git(*args, **_kwargs):
if args[0] == ['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads']:
# Create a local branch dependency tree that looks like this:
# test1 -> test2 -> test3 -> test4 -> test5
# -> test3.1
# test6 -> test0
branch_deps = [
'test2 test1', # test1 -> test2
'test3 test2', # test2 -> test3
'test3.1 test2', # test2 -> test3.1
'test4 test3', # test3 -> test4
'test5 test4', # test4 -> test5
'test6 test0', # test0 -> test6
'test7', # test7
]
return '\n'.join(branch_deps)
self.mock(git_cl, 'RunGit', mock_run_git)
class RecordCalls:
times_called = 0
record_calls = RecordCalls()
def mock_CMDupload(*args, **_kwargs):
record_calls.times_called += 1
return 0
self.mock(git_cl, 'CMDupload', mock_CMDupload)
self.calls = [
(('ask_for_data', 'This command will checkout all dependent branches '
'and run "git cl upload". Press Enter to continue, '
'or Ctrl+C to abort'), ''),
]
class MockChangelist():
def __init__(self):
pass
def GetBranch(self):
return 'test1'
def GetIssue(self):
return '123'
def GetPatchset(self):
return '1001'
def IsGerrit(self):
return False
ret = git_cl.upload_branch_deps(MockChangelist(), [])
# CMDupload should have been called 5 times because of 5 dependent branches.
self.assertEquals(5, record_calls.times_called)
self.assertEquals(0, ret)
def test_gerrit_change_id(self):
self.calls = [
((['git', 'write-tree'], ),
'hashtree'),
((['git', 'rev-parse', 'HEAD~0'], ),
'branch-parent'),
((['git', 'var', 'GIT_AUTHOR_IDENT'], ),
'A B <[email protected]> 1456848326 +0100'),
((['git', 'var', 'GIT_COMMITTER_IDENT'], ),
'C D <[email protected]> 1456858326 +0100'),
((['git', 'hash-object', '-t', 'commit', '--stdin'], ),
'hashchange'),
]
change_id = git_cl.GenerateGerritChangeId('line1\nline2\n')
self.assertEqual(change_id, 'Ihashchange')
def test_desecription_append_footer(self):
for init_desc, footer_line, expected_desc in [
# Use unique desc first lines for easy test failure identification.
('foo', 'R=one', 'foo\n\nR=one'),
('foo\n\nR=one', 'BUG=', 'foo\n\nR=one\nBUG='),
('foo\n\nR=one', 'Change-Id: Ixx', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nChange-Id: Ixx', 'R=one', 'foo\n\nR=one\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'TBR=two',
'foo\n\nR=one\nTBR=two\n\nChange-Id: Ixx'),
('foo\n\nR=one\n\nChange-Id: Ixx', 'Foo-Bar: baz',
'foo\n\nR=one\n\nChange-Id: Ixx\nFoo-Bar: baz'),
('foo\n\nChange-Id: Ixx', 'Foo-Bak: baz',
'foo\n\nChange-Id: Ixx\nFoo-Bak: baz'),
('foo', 'Change-Id: Ixx', 'foo\n\nChange-Id: Ixx'),
]:
desc = git_cl.ChangeDescription(init_desc)
desc.append_footer(footer_line)
self.assertEqual(desc.description, expected_desc)
def test_update_reviewers(self):
data = [
('foo', [], [],
'foo'),
('foo\nR=xx', [], [],
'foo\nR=xx'),
('foo\nTBR=xx', [], [],
'foo\nTBR=xx'),
('foo', ['a@c'], [],
'foo\n\nR=a@c'),
('foo\nR=xx', ['a@c'], [],
'foo\n\nR=a@c, xx'),
('foo\nTBR=xx', ['a@c'], [],
'foo\n\nR=a@c\nTBR=xx'),
('foo\nTBR=xx\nR=yy', ['a@c'], [],
'foo\n\nR=a@c, yy\nTBR=xx'),
('foo\nBUG=', ['a@c'], [],
'foo\nBUG=\nR=a@c'),
('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], [],
'foo\n\nR=a@c, bar, xx\nTBR=yy'),
('foo', ['a@c', 'b@c'], [],
'foo\n\nR=a@c, b@c'),
('foo\nBar\n\nR=\nBUG=', ['c@c'], [],
'foo\nBar\n\nR=c@c\nBUG='),
('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], [],
'foo\nBar\n\nR=c@c\nBUG='),
# Same as the line before, but full of whitespaces.
(
'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'], [],
'foo\nBar\n\nR=c@c\n BUG =',
),
# Whitespaces aren't interpreted as new lines.
('foo BUG=allo R=joe ', ['c@c'], [],
'foo BUG=allo R=joe\n\nR=c@c'),
# Redundant TBRs get promoted to Rs
('foo\n\nR=a@c\nTBR=t@c', ['b@c', 'a@c'], ['a@c', 't@c'],
'foo\n\nR=a@c, b@c\nTBR=t@c'),
]
expected = [i[-1] for i in data]
actual = []
for orig, reviewers, tbrs, _expected in data:
obj = git_cl.ChangeDescription(orig)
obj.update_reviewers(reviewers, tbrs)
actual.append(obj.description)
self.assertEqual(expected, actual)
def test_get_hash_tags(self):
cases = [
('', []),
('a', []),
('[a]', ['a']),
('[aa]', ['aa']),
('[a ]', ['a']),
('[a- ]', ['a']),
('[a- b]', ['a-b']),
('[a--b]', ['a-b']),
('[a', []),
('[a]x', ['a']),
('[aa]x', ['aa']),
('[a b]', ['a-b']),
('[a b]', ['a-b']),
('[a__b]', ['a-b']),
('[a] x', ['a']),
('[a][b]', ['a', 'b']),
('[a] [b]', ['a', 'b']),
('[a][b]x', ['a', 'b']),
('[a][b] x', ['a', 'b']),
('[a]\n[b]', ['a']),
('[a\nb]', []),
('[a][', ['a']),
('Revert "[a] feature"', ['a']),
('Reland "[a] feature"', ['a']),
('Revert: [a] feature', ['a']),
('Reland: [a] feature', ['a']),
('Revert "Reland: [a] feature"', ['a']),
('Foo: feature', ['foo']),
('Foo Bar: feature', ['foo-bar']),
('Revert "Foo bar: feature"', ['foo-bar']),
('Reland "Foo bar: feature"', ['foo-bar']),
]
for desc, expected in cases:
change_desc = git_cl.ChangeDescription(desc)
actual = change_desc.get_hash_tags()
self.assertEqual(
actual,
expected,
'GetHashTags(%r) == %r, expected %r' % (desc, actual, expected))
self.assertEqual(None, git_cl.GetTargetRef('origin', None, 'master'))
self.assertEqual(None, git_cl.GetTargetRef(None,
'refs/remotes/origin/master',
'master'))
# Check default target refs for branches.
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/master',
None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkgr',
None))
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin', 'refs/remotes/origin/lkcr',
None))
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
None))
self.assertEqual('refs/diff/test',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/refs/diff/test',
None))
self.assertEqual('refs/heads/chrome/m42',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/chrome/m42',
None))
# Check target refs for user-specified target branch.
for branch in ('branch-heads/123', 'remotes/branch-heads/123',
'refs/remotes/branch-heads/123'):
self.assertEqual('refs/branch-heads/123',
git_cl.GetTargetRef('origin',
'refs/remotes/origin/master',
branch))
for branch in ('origin/master', 'remotes/origin/master',
'refs/remotes/origin/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch))
for branch in ('master', 'heads/master', 'refs/heads/master'):
self.assertEqual('refs/heads/master',
git_cl.GetTargetRef('origin',
'refs/remotes/branch-heads/123',
branch))
def test_patch_when_dirty(self):
# Patch when local tree is dirty
self.mock(git_common, 'is_dirty_git_tree', lambda x: True)
self.assertNotEqual(git_cl.main(['patch', '123456']), 0)
@staticmethod
def _get_gerrit_codereview_server_calls(branch, value=None,
git_short_host='host',
detect_branch=True,
detect_server=True):
"""Returns calls executed by _GerritChangelistImpl.GetCodereviewServer.
If value is given, branch.<BRANCH>.gerritcodereview is already set.
"""
calls = []
if detect_branch:
calls.append(((['git', 'symbolic-ref', 'HEAD'],), branch))
if detect_server:
calls.append(((['git', 'config', 'branch.' + branch + '.gerritserver'],),
CERR1 if value is None else value))
if value is None:
calls += [
((['git', 'config', 'branch.' + branch + '.merge'],),
'refs/heads' + branch),
((['git', 'config', 'branch.' + branch + '.remote'],),
'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://%s.googlesource.com/my/repo' % git_short_host),
]
return calls
def _patch_common(self, force_codereview=False,
new_branch=False, git_short_host='host',
detect_gerrit_server=False,
actual_codereview=None,
codereview_in_url=False):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl, 'IsGitVersionAtLeast', lambda *args: True)
if new_branch:
self.calls = [((['git', 'new-branch', 'master'],), ''),]
if codereview_in_url and actual_codereview == 'rietveld':
self.calls += [
((['git', 'rev-parse', '--show-cdup'],), ''),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
]
if not force_codereview and not codereview_in_url:
# These calls detect codereview to use.
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
]
if detect_gerrit_server:
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host=git_short_host,
detect_branch=not new_branch and force_codereview)
actual_codereview = 'gerrit'
if actual_codereview == 'gerrit':
self.calls += [
(('GetChangeDetail', git_short_host + '-review.googlesource.com',
'my%2Frepo~123456', ['ALL_REVISIONS', 'CURRENT_COMMIT']),
{
'current_revision': '7777777777',
'revisions': {
'1111111111': {
'_number': 1,
'fetch': {'http': {
'url': 'https://%s.googlesource.com/my/repo' % git_short_host,
'ref': 'refs/changes/56/123456/1',
}},
},
'7777777777': {
'_number': 7,
'fetch': {'http': {
'url': 'https://%s.googlesource.com/my/repo' % git_short_host,
'ref': 'refs/changes/56/123456/7',
}},
},
},
}),
]
def test_patch_gerrit_default(self):
self._patch_common(git_short_host='chromium', detect_gerrit_server=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '123456']), 0)
def test_patch_gerrit_new_branch(self):
|
def test_patch_gerrit_force(self):
self._patch_common(
force_codereview=True, git_short_host='host', detect_gerrit_server=True)
self.calls += [
((['git', 'fetch', 'https://host.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'reset', '--hard', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://host-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '--gerrit', '123456', '--force']), 0)
def test_patch_gerrit_guess_by_url(self):
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host='else', detect_server=False)
self._patch_common(
actual_codereview='gerrit', git_short_host='else',
codereview_in_url=True, detect_gerrit_server=False)
self.calls += [
((['git', 'fetch', 'https://else.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://else-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://else-review.googlesource.com/#/c/123456/1']), 0)
def test_patch_gerrit_guess_by_url_with_repo(self):
self.calls += self._get_gerrit_codereview_server_calls(
'master', git_short_host='else', detect_server=False)
self._patch_common(
actual_codereview='gerrit', git_short_host='else',
codereview_in_url=True, detect_gerrit_server=False)
self.calls += [
((['git', 'fetch', 'https://else.googlesource.com/my/repo',
'refs/changes/56/123456/1'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://else-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '1'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(
['patch', 'https://else-review.googlesource.com/c/my/repo/+/123456/1']),
0)
def test_patch_gerrit_conflict(self):
self._patch_common(detect_gerrit_server=True, git_short_host='chromium')
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), CERR1),
((['DieWithError', 'Command "git cherry-pick FETCH_HEAD" failed.\n'],),
SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
git_cl.main(['patch', '123456'])
def test_patch_gerrit_not_exists(self):
def notExists(_issue, *_, **kwargs):
raise git_cl.gerrit_util.GerritError(404, '')
self.mock(git_cl.gerrit_util, 'GetChangeDetail', notExists)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.gerritissue'],), CERR1),
((['git', 'config', 'branch.master.gerritserver'],), CERR1),
((['git', 'config', 'branch.master.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
((['DieWithError',
'change 123456 at https://chromium-review.googlesource.com does not '
'exist or you have no access to it'],), SystemExitMock()),
]
with self.assertRaises(SystemExitMock):
self.assertEqual(1, git_cl.main(['patch', '123456']))
def _checkout_calls(self):
return [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ),
('branch.ger-branch.gerritissue 123456\n'
'branch.gbranch654.gerritissue 654321\n')),
]
def test_checkout_gerrit(self):
"""Tests git cl checkout <issue>."""
self.calls = self._checkout_calls()
self.calls += [((['git', 'checkout', 'ger-branch'], ), '')]
self.assertEqual(0, git_cl.main(['checkout', '123456']))
def test_checkout_not_found(self):
"""Tests git cl checkout <issue>."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = self._checkout_calls()
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def test_checkout_no_branch_issues(self):
"""Tests git cl checkout <issue>."""
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', '--local', '--get-regexp',
'branch\\..*\\.gerritissue'], ), CERR1),
]
self.assertEqual(1, git_cl.main(['checkout', '99999']))
def _test_gerrit_ensure_authenticated_common(self, auth,
skip_auth_check=False):
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMockFactory(hosts_with_creds=auth))
self.mock(git_cl, 'DieWithError',
lambda msg, change=None: self._mocked_call(['DieWithError', msg]))
self.calls = self._gerrit_ensure_auth_calls(skip_auth_check=skip_auth_check)
cl = git_cl.Changelist(codereview='gerrit')
cl.branch = 'master'
cl.branchref = 'refs/heads/master'
cl.lookedup_issue = True
return cl
def test_gerrit_ensure_authenticated_missing(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com': ('git-is.ok', '', 'but gerrit is missing'),
})
self.calls.append(
((['DieWithError',
'Credentials for the following hosts are required:\n'
' chromium-review.googlesource.com\n'
'These are read from ~/.gitcookies (or legacy ~/.netrc)\n'
'You can (re)generate your credentials by visiting '
'https://chromium-review.googlesource.com/new-password'],), ''),)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_conflict(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('git-one.example.com', None, 'secret1'),
'chromium-review.googlesource.com':
('git-other.example.com', None, 'secret2'),
})
self.calls.append(
(('ask_for_data', 'If you know what you are doing '
'press Enter to continue, or Ctrl+C to abort'), ''))
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_ok(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('git-same.example.com', None, 'secret'),
'chromium-review.googlesource.com':
('git-same.example.com', None, 'secret'),
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_skipped(self):
cl = self._test_gerrit_ensure_authenticated_common(
auth={}, skip_auth_check=True)
self.assertIsNone(cl.EnsureAuthenticated(force=False))
def test_gerrit_ensure_authenticated_bearer_token(self):
cl = self._test_gerrit_ensure_authenticated_common(auth={
'chromium.googlesource.com':
('', None, 'secret'),
'chromium-review.googlesource.com':
('', None, 'secret'),
})
self.assertIsNone(cl.EnsureAuthenticated(force=False))
header = gerrit_util.CookiesAuthenticator().get_auth_header(
'chromium.googlesource.com')
self.assertTrue('Bearer' in header)
def _cmd_set_commit_gerrit_common(self, vote, notify=None):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda h, i, labels, notify=None:
self._mocked_call(['SetReview', h, i, labels, notify]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'refs/heads/master'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra.git'),
((['SetReview', 'chromium-review.googlesource.com',
'infra%2Finfra~123',
{'Commit-Queue': vote}, notify],), ''),
]
def test_cmd_set_commit_gerrit_clear(self):
self._cmd_set_commit_gerrit_common(0)
self.assertEqual(0, git_cl.main(['set-commit', '-c']))
def test_cmd_set_commit_gerrit_dry(self):
self._cmd_set_commit_gerrit_common(1, notify=False)
self.assertEqual(0, git_cl.main(['set-commit', '-d']))
def test_cmd_set_commit_gerrit(self):
self._cmd_set_commit_gerrit_common(2)
self.assertEqual(0, git_cl.main(['set-commit']))
def test_description_display(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
ChangelistMock.desc = 'foo\n'
self.assertEqual(0, git_cl.main(['description', '-d']))
self.assertEqual('foo\n', out.getvalue())
def test_StatusFieldOverrideIssueMissingArgs(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--issue must be specified')
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stderr', out)
try:
self.assertEqual(git_cl.main(['status', '--issue', '1', '--gerrit']), 0)
except SystemExit as ex:
self.assertEqual(ex.code, 2)
self.assertRegexpMatches(out.getvalue(), r'--field must be specified')
def test_StatusFieldOverrideIssue(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.assertEqual(
git_cl.main(['status', '--issue', '1', '--gerrit', '--field', 'desc']),
0)
self.assertEqual(out.getvalue(), 'foobar\n')
def test_SetCloseOverrideIssue(self):
def assertIssue(cl_self, *_args):
self.assertEquals(cl_self.issue, 1)
return 'foobar'
self.mock(git_cl.Changelist, 'GetDescription', assertIssue)
self.mock(git_cl.Changelist, 'CloseIssue', lambda *_: None)
self.assertEqual(
git_cl.main(['set-close', '--issue', '1', '--gerrit']), 0)
def test_description(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/my/repo'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'my%2Frepo~123123', ['CURRENT_REVISION', 'CURRENT_COMMIT']),
{
'current_revision': 'sha1',
'revisions': {'sha1': {
'commit': {'message': 'foobar'},
}},
}),
]
self.assertEqual(0, git_cl.main([
'description',
'https://chromium-review.googlesource.com/c/my/repo/+/123123',
'-d']))
self.assertEqual('foobar\n', out.getvalue())
def test_description_set_raw(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hihi'))
self.assertEqual(0, git_cl.main(['description', '-n', 'hihi']))
self.assertEqual('hihi', ChangelistMock.desc)
def test_description_appends_bug_line(self):
current_desc = 'Some.\n\nChange-Id: xxx'
def RunEditor(desc, _, **kwargs):
self.assertEquals(
'# Enter a description of the change.\n'
'# This will be displayed on the codereview site.\n'
'# The first line will also be used as the subject of the review.\n'
'#--------------------This line is 72 characters long'
'--------------------\n'
'Some.\n\nChange-Id: xxx\nBug: ',
desc)
# Simulate user changing something.
return 'Some.\n\nChange-Id: xxx\nBug: 123'
def UpdateDescriptionRemote(_, desc, force=False):
self.assertEquals(desc, 'Some.\n\nChange-Id: xxx\nBug: 123')
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.Changelist, 'GetDescription',
lambda *args: current_desc)
self.mock(git_cl._GerritChangelistImpl, 'UpdateDescriptionRemote',
UpdateDescriptionRemote)
self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'rietveld.autoupdate'],), CERR1),
((['git', 'config', 'rietveld.bug-prefix'],), CERR1),
((['git', 'config', 'core.editor'],), 'vi'),
]
self.assertEqual(0, git_cl.main(['description', '--gerrit']))
def test_description_set_stdin(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl, 'Changelist', ChangelistMock)
self.mock(git_cl.sys, 'stdin', StringIO.StringIO('hi \r\n\t there\n\nman'))
self.assertEqual(0, git_cl.main(['description', '-n', '-']))
self.assertEqual('hi\n\t there\n\nman', ChangelistMock.desc)
def test_archive(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '456'),
((['git', 'config', 'branch.foo.gerritissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'tag', 'git-cl-archived-456-foo', 'foo'],), ''),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f']))
def test_archive_current_branch_fails(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master'),
((['git', 'config', 'branch.master.gerritissue'],), '1'),
((['git', 'symbolic-ref', 'HEAD'],), 'master')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'closed')])
self.assertEqual(1, git_cl.main(['archive', '-f']))
def test_archive_dry_run(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '456'),
((['git', 'config', 'branch.foo.gerritissue'],), CERR1),
((['git', 'config', 'branch.bar.gerritissue'],), '789'),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--dry-run']))
def test_archive_no_tags(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.calls = \
[((['git', 'for-each-ref', '--format=%(refname)', 'refs/heads'],),
'refs/heads/master\nrefs/heads/foo\nrefs/heads/bar'),
((['git', 'config', 'branch.master.gerritissue'],), '1'),
((['git', 'config', 'branch.foo.gerritissue'],), '456'),
((['git', 'config', 'branch.bar.gerritissue'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'branch', '-D', 'foo'],), '')]
self.mock(git_cl, 'get_cl_statuses',
lambda branches, fine_grained, max_processes:
[(MockChangelistWithBranchAndIssue('master', 1), 'open'),
(MockChangelistWithBranchAndIssue('foo', 456), 'closed'),
(MockChangelistWithBranchAndIssue('bar', 789), 'open')])
self.assertEqual(0, git_cl.main(['archive', '-f', '--notags']))
def test_cmd_issue_erase_existing(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
# Let this command raise exception (retcode=1) - it should be ignored.
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
((['git', 'log', '-1', '--format=%B'],), 'This is a description'),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_erase_existing_with_change_id(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetDescription',
lambda _: 'This is a description\n\nChange-Id: Ideadbeef')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
# Let this command raise exception (retcode=1) - it should be ignored.
((['git', 'config', '--unset', 'branch.feature.last-upload-hash'],),
CERR1),
((['git', 'config', '--unset', 'branch.feature.gerritissue'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritpatchset'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritserver'],), ''),
((['git', 'config', '--unset', 'branch.feature.gerritsquashhash'],),
''),
((['git', 'log', '-1', '--format=%B'],),
'This is a description\n\nChange-Id: Ideadbeef'),
((['git', 'commit', '--amend', '-m', 'This is a description\n'],), ''),
]
self.assertEqual(0, git_cl.main(['issue', '0']))
def test_cmd_issue_json(self):
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
(('write_json', 'output.json',
{'issue': 123,
'issue_url': 'https://chromium-review.googlesource.com/123'}),
''),
]
self.assertEqual(0, git_cl.main(['issue', '--json', 'output.json']))
def test_git_cl_try_default_cq_dry_run_gerrit(self):
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': '[email protected]'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['get_or_create_merge_base', 'feature', 'feature'],),
'fake_ancestor_sha'),
((['GetChange', 'fake_ancestor_sha', None], ),
git_cl.presubmit_support.GitChange(
'', '', '', '', '', '', '', '')),
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['DoGetTryMasters'], ), None),
((['SetCQState', git_cl._CQState.DRY_RUN], ), None),
]
out = StringIO.StringIO()
self.mock(git_cl.sys, 'stdout', out)
self.assertEqual(0, git_cl.main(['try']))
self.assertEqual(
out.getvalue(),
'Scheduling CQ dry run on: '
'https://chromium-review.googlesource.com/123456\n')
def test_git_cl_try_buildbucket_with_properties_gerrit(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.uuid, 'uuid4', lambda: 'uuid4')
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': '[email protected]'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
# self.maxDiff = 10000
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 1)
build = body['builds'][0]
params = json.loads(build.pop('parameters_json'))
self.assertEqual(params, {
u'builder_name': u'win',
u'changes': [{u'author': {u'email': u'[email protected]'},
u'revision': None}],
u'properties': {
u'category': u'git_cl_try',
u'key': u'val',
u'json': [{u'a': 1}, None],
u'patch_gerrit_url':
u'https://chromium-review.googlesource.com',
u'patch_issue': 123456,
u'patch_project': u'depot_tools',
u'patch_ref': u'refs/changes/56/123456/7',
u'patch_repository_url':
u'https://chromium.googlesource.com/depot_tools',
u'patch_set': 7,
u'patch_storage': u'gerrit',
}
})
self.assertEqual(build, {
u'bucket': u'luci.chromium.try',
u'client_operation_id': u'uuid4',
u'tags': [
u'builder:win',
u'buildset:patch/gerrit/chromium-review.googlesource.com/123456/7',
u'user_agent:git_cl_try',
],
})
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(0, git_cl.main([
'try', '-B', 'luci.chromium.try', '-b', 'win',
'-p', 'key=val', '-p', 'json=[{"a":1}, null]']))
self.assertRegexpMatches(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\nBucket: luci.chromium.try')
def test_git_cl_try_bots_on_multiple_masters(self):
self.mock(git_cl.Changelist, 'GetMostRecentPatchset', lambda _: 7)
self.mock(git_cl.Changelist, 'GetChange',
lambda _, *a: (
self._mocked_call(['GetChange']+list(a))))
self.mock(git_cl.presubmit_support, 'DoGetTryMasters',
lambda *_, **__: (
self._mocked_call(['DoGetTryMasters'])))
self.mock(git_cl._GerritChangelistImpl, 'SetCQState',
lambda _, s: self._mocked_call(['SetCQState', s]))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '123456'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://chromium-review.googlesource.com'),
((['git', 'config', 'branch.feature.merge'],), 'feature'),
((['git', 'config', 'branch.feature.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/depot_tools'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'depot_tools~123456',
['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'CURRENT_COMMIT']), {
'project': 'depot_tools',
'status': 'OPEN',
'owner': {'email': '[email protected]'},
'revisions': {
'deadbeaf': {
'_number': 6,
},
'beeeeeef': {
'_number': 7,
'fetch': {'http': {
'url': 'https://chromium.googlesource.com/depot_tools',
'ref': 'refs/changes/56/123456/7'
}},
},
},
}),
]
def _buildbucket_retry(*_, **kw):
body = json.loads(kw['body'])
self.assertEqual(len(body['builds']), 2)
self.assertEqual(body['builds'][0]['bucket'], 'bucket1')
params = json.loads(body['builds'][0]['parameters_json'])
self.assertEqual(params['builder_name'], 'builder1')
self.assertEqual(body['builds'][1]['bucket'], 'bucket2')
params = json.loads(body['builds'][1]['parameters_json'])
self.assertEqual(params['builder_name'], 'builder2')
self.mock(git_cl, '_buildbucket_retry', _buildbucket_retry)
self.mock(git_cl.urllib2, 'urlopen', lambda _: StringIO.StringIO(
json.dumps({
'builder1': {'bucket': 'bucket1'},
'builder2': {'bucket': 'bucket2'},
})))
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.assertEqual(
0, git_cl.main(['try', '-b', 'builder1', '-b', 'builder2']))
self.assertEqual(
git_cl.sys.stdout.getvalue(),
'Tried jobs on:\n'
'Bucket: bucket1\n'
' builder1: []\n'
'Bucket: bucket2\n'
' builder2: []\n'
'To see results here, run: git cl try-results\n'
'To see results in browser, run: git cl web\n')
def _common_GerritCommitMsgHookCheck(self):
self.mock(git_cl.sys, 'stdout', StringIO.StringIO())
self.mock(git_cl.os.path, 'abspath',
lambda path: self._mocked_call(['abspath', path]))
self.mock(git_cl.os.path, 'exists',
lambda path: self._mocked_call(['exists', path]))
self.mock(git_cl.gclient_utils, 'FileRead',
lambda path: self._mocked_call(['FileRead', path]))
self.mock(git_cl.gclient_utils, 'rm_file_or_tree',
lambda path: self._mocked_call(['rm_file_or_tree', path]))
self.calls = [
((['git', 'rev-parse', '--show-cdup'],), '../'),
((['abspath', '../'],), '/abs/git_repo_root'),
]
return git_cl.Changelist(codereview='gerrit', issue=123)
def test_GerritCommitMsgHookCheck_custom_hook(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'#!/bin/sh\necho "custom hook"')
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck_not_exists(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), False),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCommitMsgHookCheck(self):
cl = self._common_GerritCommitMsgHookCheck()
self.calls += [
((['exists', '/abs/git_repo_root/.git/hooks/commit-msg'],), True),
((['FileRead', '/abs/git_repo_root/.git/hooks/commit-msg'],),
'...\n# From Gerrit Code Review\n...\nadd_ChangeId()\n'),
(('ask_for_data', 'Do you want to remove it now? [Yes/No]: '), 'Yes'),
((['rm_file_or_tree', '/abs/git_repo_root/.git/hooks/commit-msg'],),
''),
]
cl._codereview_impl._GerritCommitMsgHookCheck(offer_removal=True)
def test_GerritCmdLand(self):
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritsquashhash'],),
'deadbeaf'),
((['git', 'diff', 'deadbeaf'],), ''), # No diff.
((['git', 'config', 'branch.feature.gerritserver'],),
'chromium-review.googlesource.com'),
]
cl = git_cl.Changelist(issue=123, codereview='gerrit')
cl._codereview_impl._GetChangeDetail = lambda _: {
'labels': {},
'current_revision': 'deadbeaf',
}
cl._codereview_impl._GetChangeCommit = lambda: {
'commit': 'deadbeef',
'web_links': [{'name': 'gitiles',
'url': 'https://git.googlesource.com/test/+/deadbeef'}],
}
cl._codereview_impl.SubmitIssue = lambda wait_for_merge: None
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.assertEqual(0, cl.CMDLand(force=True,
bypass_hooks=True,
verbose=True,
parallel=False))
self.assertRegexpMatches(out.getvalue(), 'Issue.*123 has been submitted')
self.assertRegexpMatches(out.getvalue(), 'Landed as: .*deadbeef')
BUILDBUCKET_BUILDS_MAP = {
'9000': {
'id': '9000',
'bucket': 'master.x.y',
'created_by': 'user:[email protected]',
'created_ts': '147200002222000',
'experimental': False,
'parameters_json': json.dumps({
'builder_name': 'my-bot',
'properties': {'category': 'cq'},
}),
'status': 'STARTED',
'tags': [
'build_address:x.y/my-bot/2',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/2',
},
'8000': {
'id': '8000',
'bucket': 'master.x.y',
'created_by': 'user:[email protected]',
'created_ts': '147200001111000',
'experimental': False,
'failure_reason': 'BUILD_FAILURE',
'parameters_json': json.dumps({
'builder_name': 'my-bot',
'properties': {'category': 'cq'},
}),
'result_details_json': json.dumps({
'properties': {'buildnumber': 1},
}),
'result': 'FAILURE',
'status': 'COMPLETED',
'tags': [
'build_address:x.y/my-bot/1',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/1',
},
}
def test_write_try_results_json(self):
expected_output = [
{
'bucket': 'master.x.y',
'buildbucket_id': '8000',
'builder_name': 'my-bot',
'created_ts': '147200001111000',
'experimental': False,
'failure_reason': 'BUILD_FAILURE',
'result': 'FAILURE',
'status': 'COMPLETED',
'tags': [
'build_address:x.y/my-bot/1',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/1',
},
{
'bucket': 'master.x.y',
'buildbucket_id': '9000',
'builder_name': 'my-bot',
'created_ts': '147200002222000',
'experimental': False,
'failure_reason': None,
'result': None,
'status': 'STARTED',
'tags': [
'build_address:x.y/my-bot/2',
'builder:my-bot',
'experimental:false',
'user_agent:cq',
],
'url': 'http://build.cr.org/p/x.y/builders/my-bot/builds/2',
},
]
self.calls = [(('write_json', 'output.json', expected_output), '')]
git_cl.write_try_results_json('output.json', self.BUILDBUCKET_BUILDS_MAP)
def _setup_fetch_try_jobs(self, most_recent_patchset=20001):
out = StringIO.StringIO()
self.mock(sys, 'stdout', out)
self.mock(git_cl.Changelist, 'GetMostRecentPatchset',
lambda *args: most_recent_patchset)
self.mock(git_cl.auth, 'get_authenticator_for_host', lambda host, _cfg:
self._mocked_call(['get_authenticator_for_host', host]))
self.mock(git_cl, '_buildbucket_retry', lambda *_, **__:
self._mocked_call(['_buildbucket_retry']))
def _setup_fetch_try_jobs_gerrit(self, *request_results):
self._setup_fetch_try_jobs(most_recent_patchset=13)
self.calls += [
((['git', 'symbolic-ref', 'HEAD'],), 'feature'),
((['git', 'config', 'branch.feature.gerritissue'],), '1'),
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# Simulate that Gerrit has more patchsets than local.
# ((['git', 'config', 'branch.feature.gerritpatchset'],), '12'),
((['git', 'config', 'branch.feature.gerritserver'],),
'https://x-review.googlesource.com'),
((['get_authenticator_for_host', 'x-review.googlesource.com'],),
AuthenticatorMock()),
] + [((['_buildbucket_retry'],), r) for r in request_results]
def test_fetch_try_jobs_none_gerrit(self):
self._setup_fetch_try_jobs_gerrit({})
self.assertEqual(0, git_cl.main(['try-results']))
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# self.assertRegexpMatches(
# sys.stdout.getvalue(),
# r'Warning: Codereview server has newer patchsets \(13\)')
self.assertRegexpMatches(sys.stdout.getvalue(), 'No try jobs')
def test_fetch_try_jobs_some_gerrit(self):
self._setup_fetch_try_jobs_gerrit({
'builds': self.BUILDBUCKET_BUILDS_MAP.values(),
})
# TODO(tandrii): Uncomment the below if we decide to support checking
# patchsets for Gerrit.
# self.calls.remove(
# ((['git', 'config', 'branch.feature.gerritpatchset'],), '12'))
self.assertEqual(0, git_cl.main(['try-results', '--patchset', '5']))
# ... and doesn't result in warning.
self.assertNotRegexpMatches(sys.stdout.getvalue(), 'Warning')
self.assertRegexpMatches(sys.stdout.getvalue(), '^Failures:')
self.assertRegexpMatches(sys.stdout.getvalue(), 'Started:')
self.assertRegexpMatches(sys.stdout.getvalue(), '2 try jobs')
def _mock_gerrit_changes_for_detail_cache(self):
self.mock(git_cl._GerritChangelistImpl, '_GetGerritHost', lambda _: 'host')
def test_gerrit_change_detail_cache_simple(self):
self._mock_gerrit_changes_for_detail_cache()
self.calls = [
(('GetChangeDetail', 'host', 'my%2Frepo~1', []), 'a'),
(('GetChangeDetail', 'host', 'ab%2Frepo~2', []), 'b'),
(('GetChangeDetail', 'host', 'ab%2Frepo~2', []), 'b2'),
]
cl1 = git_cl.Changelist(issue=1, codereview='gerrit')
cl1._cached_remote_url = (
True, 'https://chromium.googlesource.com/a/my/repo.git/')
cl2 = git_cl.Changelist(issue=2, codereview='gerrit')
cl2._cached_remote_url = (
True, 'https://chromium.googlesource.com/ab/repo')
self.assertEqual(cl1._GetChangeDetail(), 'a') # Miss.
self.assertEqual(cl1._GetChangeDetail(), 'a')
self.assertEqual(cl2._GetChangeDetail(), 'b') # Miss.
self.assertEqual(cl2._GetChangeDetail(no_cache=True), 'b2') # Miss.
self.assertEqual(cl1._GetChangeDetail(), 'a')
self.assertEqual(cl2._GetChangeDetail(), 'b2')
def test_gerrit_change_detail_cache_options(self):
self._mock_gerrit_changes_for_detail_cache()
self.calls = [
(('GetChangeDetail', 'host', 'repo~1', ['C', 'A', 'B']), 'cab'),
(('GetChangeDetail', 'host', 'repo~1', ['A', 'D']), 'ad'),
(('GetChangeDetail', 'host', 'repo~1', ['A']), 'a'), # no_cache=True
# no longer in cache.
(('GetChangeDetail', 'host', 'repo~1', ['B']), 'b'),
]
cl = git_cl.Changelist(issue=1, codereview='gerrit')
cl._cached_remote_url = (True, 'https://chromium.googlesource.com/repo/')
self.assertEqual(cl._GetChangeDetail(options=['C', 'A', 'B']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A', 'B', 'C']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['B', 'A']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['C']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A']), 'cab')
self.assertEqual(cl._GetChangeDetail(), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['A', 'D']), 'ad')
self.assertEqual(cl._GetChangeDetail(options=['A']), 'cab')
self.assertEqual(cl._GetChangeDetail(options=['D']), 'ad')
self.assertEqual(cl._GetChangeDetail(), 'cab')
# Finally, no_cache should invalidate all caches for given change.
self.assertEqual(cl._GetChangeDetail(options=['A'], no_cache=True), 'a')
self.assertEqual(cl._GetChangeDetail(options=['B']), 'b')
def test_gerrit_description_caching(self):
def gen_detail(rev, desc):
return {
'current_revision': rev,
'revisions': {rev: {'commit': {'message': desc}}}
}
self.calls = [
(('GetChangeDetail', 'host', 'my%2Frepo~1',
['CURRENT_REVISION', 'CURRENT_COMMIT']),
gen_detail('rev1', 'desc1')),
(('GetChangeDetail', 'host', 'my%2Frepo~1',
['CURRENT_REVISION', 'CURRENT_COMMIT']),
gen_detail('rev2', 'desc2')),
]
self._mock_gerrit_changes_for_detail_cache()
cl = git_cl.Changelist(issue=1, codereview='gerrit')
cl._cached_remote_url = (
True, 'https://chromium.googlesource.com/a/my/repo.git/')
self.assertEqual(cl.GetDescription(), 'desc1')
self.assertEqual(cl.GetDescription(), 'desc1') # cache hit.
self.assertEqual(cl.GetDescription(force=True), 'desc2')
def test_print_current_creds(self):
class CookiesAuthenticatorMock(object):
def __init__(self):
self.gitcookies = {
'host.googlesource.com': ('user', 'pass'),
'host-review.googlesource.com': ('user', 'pass'),
}
self.netrc = self
self.netrc.hosts = {
'github.com': ('user2', None, 'pass2'),
'host2.googlesource.com': ('user3', None, 'pass'),
}
self.mock(git_cl.gerrit_util, 'CookiesAuthenticator',
CookiesAuthenticatorMock)
self.mock(sys, 'stdout', StringIO.StringIO())
git_cl._GitCookiesChecker().print_current_creds(include_netrc=True)
self.assertEqual(list(sys.stdout.getvalue().splitlines()), [
' Host\t User\t Which file',
'============================\t=====\t===========',
'host-review.googlesource.com\t user\t.gitcookies',
' host.googlesource.com\t user\t.gitcookies',
' host2.googlesource.com\tuser3\t .netrc',
])
sys.stdout.buf = ''
git_cl._GitCookiesChecker().print_current_creds(include_netrc=False)
self.assertEqual(list(sys.stdout.getvalue().splitlines()), [
' Host\tUser\t Which file',
'============================\t====\t===========',
'host-review.googlesource.com\tuser\t.gitcookies',
' host.googlesource.com\tuser\t.gitcookies',
])
def _common_creds_check_mocks(self):
def exists_mock(path):
dirname = os.path.dirname(path)
if dirname == os.path.expanduser('~'):
dirname = '~'
base = os.path.basename(path)
if base in ('.netrc', '.gitcookies'):
return self._mocked_call('os.path.exists', '%s/%s' % (dirname, base))
# git cl also checks for existence other files not relevant to this test.
return None
self.mock(os.path, 'exists', exists_mock)
self.mock(sys, 'stdout', StringIO.StringIO())
def test_creds_check_gitcookies_not_configured(self):
self._common_creds_check_mocks()
self.mock(git_cl._GitCookiesChecker, 'get_hosts_with_creds',
lambda _, include_netrc=False: [])
self.calls = [
((['git', 'config', '--path', 'http.cookiefile'],), CERR1),
((['git', 'config', '--global', 'http.cookiefile'],), CERR1),
(('os.path.exists', '~/.netrc'), True),
(('ask_for_data', 'Press Enter to setup .gitcookies, '
'or Ctrl+C to abort'), ''),
((['git', 'config', '--global', 'http.cookiefile',
os.path.expanduser('~/.gitcookies')], ), ''),
]
self.assertEqual(0, git_cl.main(['creds-check']))
self.assertRegexpMatches(
sys.stdout.getvalue(),
'^You seem to be using outdated .netrc for git credentials:')
self.assertRegexpMatches(
sys.stdout.getvalue(),
'\nConfigured git to use .gitcookies from')
def test_creds_check_gitcookies_configured_custom_broken(self):
self._common_creds_check_mocks()
self.mock(git_cl._GitCookiesChecker, 'get_hosts_with_creds',
lambda _, include_netrc=False: [])
self.calls = [
((['git', 'config', '--path', 'http.cookiefile'],), CERR1),
((['git', 'config', '--global', 'http.cookiefile'],),
'/custom/.gitcookies'),
(('os.path.exists', '/custom/.gitcookies'), False),
(('ask_for_data', 'Reconfigure git to use default .gitcookies? '
'Press Enter to reconfigure, or Ctrl+C to abort'), ''),
((['git', 'config', '--global', 'http.cookiefile',
os.path.expanduser('~/.gitcookies')], ), ''),
]
self.assertEqual(0, git_cl.main(['creds-check']))
self.assertRegexpMatches(
sys.stdout.getvalue(),
'WARNING: You have configured custom path to .gitcookies: ')
self.assertRegexpMatches(
sys.stdout.getvalue(),
'However, your configured .gitcookies file is missing.')
def test_git_cl_comment_add_gerrit(self):
self.mock(git_cl.gerrit_util, 'SetReview',
lambda host, change, msg, ready:
self._mocked_call('SetReview', host, change, msg, ready))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), CERR1),
((['git', 'symbolic-ref', 'HEAD'],), CERR1),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('SetReview', 'chromium-review.googlesource.com', 'infra%2Finfra~10',
'msg', None),
None),
]
self.assertEqual(0, git_cl.main(['comment', '--gerrit', '-i', '10',
'-a', 'msg']))
def test_git_cl_comments_fetch_gerrit(self):
self.mock(sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', 'branch.foo.gerritserver'],), ''),
((['git', 'config', 'branch.foo.merge'],), ''),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'infra%2Finfra~1',
['MESSAGES', 'DETAILED_ACCOUNTS', 'CURRENT_REVISION',
'CURRENT_COMMIT']), {
'owner': {'email': '[email protected]'},
'current_revision': 'ba5eba11',
'revisions': {
'deadbeaf': {
'_number': 1,
},
'ba5eba11': {
'_number': 2,
},
},
'messages': [
{
u'_revision_number': 1,
u'author': {
u'_account_id': 1111084,
u'email': u'[email protected]',
u'name': u'Commit Bot'
},
u'date': u'2017-03-15 20:08:45.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046dc50b',
u'message': u'Patch Set 1:\n\nDry run: CQ is trying the patch...',
u'tag': u'autogenerated:cq:dry-run'
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 11151243,
u'email': u'[email protected]',
u'name': u'owner'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'PTAL',
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 148512 ,
u'email': u'[email protected]',
u'name': u'reviewer'
},
u'date': u'2017-03-17 05:19:37.500000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d4568',
u'message': u'Patch Set 2: Code-Review+1',
},
]
}),
(('GetChangeComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {
'/COMMIT_MSG': [
{
'author': {'email': u'[email protected]'},
'updated': u'2017-03-17 05:19:37.500000000',
'patch_set': 2,
'side': 'REVISION',
'message': 'Please include a bug link',
},
],
'codereview.settings': [
{
'author': {'email': u'[email protected]'},
'updated': u'2017-03-16 20:00:41.000000000',
'patch_set': 2,
'side': 'PARENT',
'line': 42,
'message': 'I removed this because it is bad',
},
]
}),
(('GetChangeRobotComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {}),
((['git', 'config', 'branch.foo.gerritpatchset', '2'],), ''),
] * 2 + [
(('write_json', 'output.json', [
{
u'date': u'2017-03-16 20:00:41.000000',
u'message': (
u'PTAL\n' +
u'\n' +
u'codereview.settings\n' +
u' Base, Line 42: https://chromium-review.googlesource.com/' +
u'c/1/2/codereview.settings#b42\n' +
u' I removed this because it is bad\n'),
u'autogenerated': False,
u'approval': False,
u'disapproval': False,
u'sender': u'[email protected]'
}, {
u'date': u'2017-03-17 05:19:37.500000',
u'message': (
u'Patch Set 2: Code-Review+1\n' +
u'\n' +
u'/COMMIT_MSG\n' +
u' PS2, File comment: https://chromium-review.googlesource' +
u'.com/c/1/2//COMMIT_MSG#\n' +
u' Please include a bug link\n'),
u'autogenerated': False,
u'approval': False,
u'disapproval': False,
u'sender': u'[email protected]'
}
]),'')
]
expected_comments_summary = [
git_cl._CommentSummary(
message=(
u'PTAL\n' +
u'\n' +
u'codereview.settings\n' +
u' Base, Line 42: https://chromium-review.googlesource.com/' +
u'c/1/2/codereview.settings#b42\n' +
u' I removed this because it is bad\n'),
date=datetime.datetime(2017, 3, 16, 20, 0, 41, 0),
autogenerated=False,
disapproval=False, approval=False, sender=u'[email protected]'),
git_cl._CommentSummary(
message=(
u'Patch Set 2: Code-Review+1\n' +
u'\n' +
u'/COMMIT_MSG\n' +
u' PS2, File comment: https://chromium-review.googlesource.com/' +
u'c/1/2//COMMIT_MSG#\n' +
u' Please include a bug link\n'),
date=datetime.datetime(2017, 3, 17, 5, 19, 37, 500000),
autogenerated=False,
disapproval=False, approval=False, sender=u'[email protected]'),
]
cl = git_cl.Changelist(
codereview='gerrit', issue=1, branchref='refs/heads/foo')
self.assertEqual(cl.GetCommentsSummary(), expected_comments_summary)
self.mock(git_cl.Changelist, 'GetBranch', lambda _: 'foo')
self.assertEqual(
0, git_cl.main(['comments', '-i', '1', '-j', 'output.json']))
def test_git_cl_comments_robot_comments(self):
# git cl comments also fetches robot comments (which are considered a type
# of autogenerated comment), and unlike other types of comments, only robot
# comments from the latest patchset are shown.
self.mock(sys, 'stdout', StringIO.StringIO())
self.calls = [
((['git', 'config', 'branch.foo.gerritserver'],), ''),
((['git', 'config', 'branch.foo.merge'],), ''),
((['git', 'config', 'rietveld.upstream-branch'],), CERR1),
((['git', 'branch', '-r'],), 'origin/HEAD -> origin/master\n'
'origin/master'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/infra/infra'),
(('GetChangeDetail', 'chromium-review.googlesource.com',
'infra%2Finfra~1',
['MESSAGES', 'DETAILED_ACCOUNTS', 'CURRENT_REVISION',
'CURRENT_COMMIT']), {
'owner': {'email': '[email protected]'},
'current_revision': 'ba5eba11',
'revisions': {
'deadbeaf': {
'_number': 1,
},
'ba5eba11': {
'_number': 2,
},
},
'messages': [
{
u'_revision_number': 1,
u'author': {
u'_account_id': 1111084,
u'email': u'[email protected]',
u'name': u'Commit Bot'
},
u'date': u'2017-03-15 20:08:45.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046dc50b',
u'message': u'Patch Set 1:\n\nDry run: CQ is trying the patch...',
u'tag': u'autogenerated:cq:dry-run'
},
{
u'_revision_number': 1,
u'author': {
u'_account_id': 123,
u'email': u'[email protected]',
u'name': u'Tricium'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'(1 comment)',
u'tag': u'autogenerated:tricium',
},
{
u'_revision_number': 1,
u'author': {
u'_account_id': 123,
u'email': u'[email protected]',
u'name': u'Tricium'
},
u'date': u'2017-03-16 20:00:41.000000000',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d1234',
u'message': u'(1 comment)',
u'tag': u'autogenerated:tricium',
},
{
u'_revision_number': 2,
u'author': {
u'_account_id': 123 ,
u'email': u'[email protected]',
u'name': u'reviewer'
},
u'date': u'2017-03-17 05:30:37.000000000',
u'tag': u'autogenerated:tricium',
u'id': u'f5a6c25ecbd3b3b54a43ae418ed97eff046d4568',
u'message': u'(1 comment)',
},
]
}),
(('GetChangeComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {}),
(('GetChangeRobotComments', 'chromium-review.googlesource.com',
'infra%2Finfra~1'), {
'codereview.settings': [
{
u'author': {u'email': u'[email protected]'},
u'updated': u'2017-03-17 05:30:37.000000000',
u'robot_run_id': u'5565031076855808',
u'robot_id': u'Linter/Category',
u'tag': u'autogenerated:tricium',
u'patch_set': 2,
u'side': u'REVISION',
u'message': u'Linter warning message text',
u'line': 32,
},
],
}),
((['git', 'config', 'branch.foo.gerritpatchset', '2'],), ''),
]
expected_comments_summary = [
git_cl._CommentSummary(date=datetime.datetime(2017, 3, 17, 5, 30, 37),
message=(
u'(1 comment)\n\ncodereview.settings\n'
u' PS2, Line 32: https://chromium-review.googlesource.com/'
u'c/1/2/codereview.settings#32\n'
u' Linter warning message text\n'),
sender=u'[email protected]',
autogenerated=True, approval=False, disapproval=False)
]
cl = git_cl.Changelist(
codereview='gerrit', issue=1, branchref='refs/heads/foo')
self.assertEqual(cl.GetCommentsSummary(), expected_comments_summary)
def test_get_remote_url_with_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-exists':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
url = 'https://chromium.googlesource.com/my/repo'
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-exists'),
(('os.path.isdir', '/cache/this-dir-exists'),
True),
# Runs in /cache/this-dir-exists.
((['git', 'config', 'remote.origin.url'],),
url),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertEqual(cl.GetRemoteUrl(), url)
self.assertEqual(cl.GetRemoteUrl(), url) # Must be cached.
def test_get_remote_url_non_existing_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-doesnt-exist':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
self.mock(logging, 'error',
lambda fmt, *a: self._mocked_call('logging.error', fmt % a))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-doesnt-exist'),
(('os.path.isdir', '/cache/this-dir-doesnt-exist'),
False),
(('logging.error',
'Remote "origin" for branch "/cache/this-dir-doesnt-exist" points to'
' "master", but it doesn\'t exist.'), None),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertIsNone(cl.GetRemoteUrl())
def test_get_remote_url_misconfigured_mirror(self):
original_os_path_isdir = os.path.isdir
def selective_os_path_isdir_mock(path):
if path == '/cache/this-dir-exists':
return self._mocked_call('os.path.isdir', path)
return original_os_path_isdir(path)
self.mock(os.path, 'isdir', selective_os_path_isdir_mock)
self.mock(logging, 'error',
lambda *a: self._mocked_call('logging.error', *a))
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'/cache/this-dir-exists'),
(('os.path.isdir', '/cache/this-dir-exists'), True),
# Runs in /cache/this-dir-exists.
((['git', 'config', 'remote.origin.url'],), ''),
(('logging.error',
'Remote "%(remote)s" for branch "%(branch)s" points to '
'"%(cache_path)s", but it is misconfigured.\n'
'"%(cache_path)s" must be a git repo and must have a remote named '
'"%(remote)s" pointing to the git host.', {
'remote': 'origin',
'cache_path': '/cache/this-dir-exists',
'branch': 'master'}
), None),
]
cl = git_cl.Changelist(codereview='gerrit', issue=1)
self.assertIsNone(cl.GetRemoteUrl())
def test_gerrit_change_identifier_with_project(self):
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],),
'https://chromium.googlesource.com/a/my/repo.git/'),
]
cl = git_cl.Changelist(codereview='gerrit', issue=123456)
self.assertEqual(cl._GerritChangeIdentifier(), 'my%2Frepo~123456')
def test_gerrit_change_identifier_without_project(self):
self.calls = [
((['git', 'symbolic-ref', 'HEAD'],), 'master'),
((['git', 'config', 'branch.master.merge'],), 'master'),
((['git', 'config', 'branch.master.remote'],), 'origin'),
((['git', 'config', 'remote.origin.url'],), CERR1),
]
cl = git_cl.Changelist(codereview='gerrit', issue=123456)
self.assertEqual(cl._GerritChangeIdentifier(), '123456')
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
unittest.main()
| self._patch_common(
git_short_host='chromium', detect_gerrit_server=True, new_branch=True)
self.calls += [
((['git', 'fetch', 'https://chromium.googlesource.com/my/repo',
'refs/changes/56/123456/7'],), ''),
((['git', 'cherry-pick', 'FETCH_HEAD'],), ''),
((['git', 'config', 'branch.master.gerritissue', '123456'],),
''),
((['git', 'config', 'branch.master.gerritserver',
'https://chromium-review.googlesource.com'],), ''),
((['git', 'config', 'branch.master.gerritpatchset', '7'],), ''),
((['git', 'rev-parse', 'FETCH_HEAD'],), 'deadbeef'),
((['git', 'config', 'branch.master.last-upload-hash', 'deadbeef'],), ''),
((['git', 'config', 'branch.master.gerritsquashhash', 'deadbeef'],), ''),
]
self.assertEqual(git_cl.main(['patch', '-b', 'master', '123456']), 0) |
longest common subsequence.py | """
LCS Problem Statement: Given two sequences, find the length of longest subsequence present in both of them.
A subsequence is a sequence that appears in the same relative order, but not necessarily continious.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""
def LCS(s1, s2):
| m = len(s1)
n = len(s2)
arr = [[0 for i in range(n+1)]for j in range(m+1)]
for i in range(1,m+1):
for j in range(1,n+1):
if s1[i-1] == s2[j-1]:
arr[i][j] = arr[i-1][j-1]+1
else:
arr[i][j] = max(arr[i-1][j], arr[i][j-1])
return arr[m][n] |
|
.eslintrc.js | module.exports = {
parser: '@typescript-eslint/parser', // Specifies the ESLint parser
extends: [
'plugin:react/recommended', // Uses the recommended rules from @eslint-plugin-react
'plugin:@typescript-eslint/recommended', // Uses the recommended rules from the @typescript-eslint/eslint-plugin
'prettier/@typescript-eslint', // Uses eslint-config-prettier to disable ESLint rules from @typescript-eslint/eslint-plugin that would conflict with prettier
'plugin:prettier/recommended', // Enables eslint-plugin-prettier and
],
parserOptions: { | ecmaVersion: 2018, // Allows for the parsing of modern ECMAScript features
sourceType: 'module', // Allows for the use of imports
ecmaFeatures: {
jsx: true, // Allows for the parsing of JSX
},
},
rules: {
// Place to specify ESLint rules. Can be used to overwrite rules specified from the extended configs
'prettier/prettier': 'error',
// e.g. "@typescript-eslint/explicit-function-return-type": "off",
},
settings: {
react: {
version: 'detect', // Tells eslint-plugin-react to automatically detect the version of React to use
},
},
}; | |
bench.rs | use solana_metrics;
use bincode;
use log::*;
use rayon::prelude::*;
use solana::gen_keys::GenKeys;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_drone::drone::request_airdrop_transaction;
use solana_librapay_api::{create_genesis, upload_mint_program, upload_payment_program};
use solana_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_sdk::client::Client;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_instruction;
use solana_sdk::system_transaction;
use solana_sdk::timing::timestamp;
use solana_sdk::timing::{duration_as_ms, duration_as_s};
use solana_sdk::transaction::Transaction;
use std::cmp;
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::Builder;
use std::time::Duration;
use std::time::Instant;
use solana_librapay_api::librapay_transaction;
pub const MAX_SPENDS_PER_TX: u64 = 4;
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128;
#[derive(Debug)]
pub enum BenchTpsError {
AirdropFailure,
}
pub type Result<T> = std::result::Result<T, BenchTpsError>;
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
pub struct Config {
pub id: Keypair,
pub threads: usize,
pub thread_batch_sleep_ms: usize,
pub duration: Duration,
pub tx_count: usize,
pub sustained: bool,
pub use_move: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
id: Keypair::new(),
threads: 4,
thread_batch_sleep_ms: 0,
duration: Duration::new(std::u64::MAX, 0),
tx_count: 500_000,
sustained: false,
use_move: false,
}
}
}
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
pub fn do_bench_tps<T>(
clients: Vec<T>,
config: Config,
gen_keypairs: Vec<Keypair>,
keypair0_balance: u64,
libra_args: Option<LibraKeys>,
) -> u64
where
T: 'static + Client + Send + Sync,
{
let Config {
id,
threads,
thread_batch_sleep_ms,
duration,
tx_count,
sustained,
..
} = config;
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
let client = &clients[0];
let start = gen_keypairs.len() - (tx_count * 2) as usize;
let keypairs = &gen_keypairs[start..];
let first_tx_count = client.get_transaction_count().expect("transaction count");
println!("Initial transaction count {}", first_tx_count);
let exit_signal = Arc::new(AtomicBool::new(false));
// Setup a thread per validator to sample every period
// collect the max transaction rate and total tx count seen
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
println!("Sampling TPS every {} second...", sample_period);
let v_threads: Vec<_> = clients
.iter()
.map(|client| {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
let client = client.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_txs(&exit_signal, &maxes, sample_period, &client);
})
.unwrap()
})
.collect();
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
let s_threads: Vec<_> = (0..threads)
.map(|_| {
let exit_signal = exit_signal.clone();
let shared_txs = shared_txs.clone();
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
let total_tx_sent_count = total_tx_sent_count.clone();
let client = client.clone();
Builder::new()
.name("solana-client-sender".to_string())
.spawn(move || {
do_tx_transfers(
&exit_signal,
&shared_txs,
&shared_tx_active_thread_count,
&total_tx_sent_count,
thread_batch_sleep_ms,
&client,
);
})
.unwrap()
})
.collect();
// generate and send transactions for the specified duration
let start = Instant::now();
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
let mut blockhash = Hash::default();
let mut blockhash_time = Instant::now();
while start.elapsed() < duration {
// ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual
// accounts
let len = tx_count as usize;
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
blockhash = new_blockhash;
} else {
if blockhash_time.elapsed().as_secs() > 30 {
panic!("Blockhash is not updating");
}
sleep(Duration::from_millis(100));
continue;
}
blockhash_time = Instant::now();
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
generate_txs(
&shared_txs,
&blockhash,
&keypairs[..len],
&keypairs[len..],
threads,
reclaim_lamports_back_to_source_account,
&libra_args,
);
// In sustained mode overlap the transfers with generation
// this has higher average performance but lower peak performance
// in tested environments.
if !sustained {
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
sleep(Duration::from_millis(1));
}
}
i += 1;
if should_switch_directions(NUM_LAMPORTS_PER_ACCOUNT, i) {
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
}
}
// Stop the sampling threads so it will collect the stats
exit_signal.store(true, Ordering::Relaxed);
println!("Waiting for validator threads...");
for t in v_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
// join the tx send threads
println!("Waiting for transmit threads...");
for t in s_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
compute_and_report_stats(
&maxes,
sample_period,
&start.elapsed(),
total_tx_sent_count.load(Ordering::Relaxed),
);
let r_maxes = maxes.read().unwrap();
r_maxes.first().unwrap().1.txs
}
fn metrics_submit_lamport_balance(lamport_balance: u64) {
println!("Token balance: {}", lamport_balance);
datapoint_info!(
"bench-tps-lamport_balance",
("balance", lamport_balance, i64)
);
}
fn generate_move_txs(
source: &[Keypair],
dest: &[Keypair],
reclaim: bool,
move_keypairs: &[Keypair],
libra_pay_program_id: &Pubkey,
libra_mint_id: &Pubkey,
blockhash: &Hash,
) -> Vec<(Transaction, u64)> {
let count = move_keypairs.len() / 2;
let source_move = &move_keypairs[..count];
let dest_move = &move_keypairs[count..];
let pairs: Vec<_> = if !reclaim {
source_move
.iter()
.zip(dest_move.iter())
.zip(source.iter())
.collect()
} else {
dest_move
.iter()
.zip(source_move.iter())
.zip(dest.iter())
.collect()
};
pairs
.par_iter()
.map(|((from, to), payer)| {
(
librapay_transaction::transfer(
libra_pay_program_id,
libra_mint_id,
&payer,
&from,
&to.pubkey(),
1,
*blockhash,
),
timestamp(),
)
})
.collect()
}
fn generate_system_txs(
source: &[Keypair],
dest: &[Keypair],
reclaim: bool,
blockhash: &Hash,
) -> Vec<(Transaction, u64)> {
let pairs: Vec<_> = if !reclaim {
source.iter().zip(dest.iter()).collect()
} else {
dest.iter().zip(source.iter()).collect()
};
pairs
.par_iter()
.map(|(from, to)| {
(
system_transaction::create_user_account(from, &to.pubkey(), 1, *blockhash),
timestamp(),
)
})
.collect()
}
fn generate_txs(
shared_txs: &SharedTransactions,
blockhash: &Hash,
source: &[Keypair],
dest: &[Keypair],
threads: usize,
reclaim: bool,
libra_args: &Option<LibraKeys>,
) {
let tx_count = source.len();
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
let signing_start = Instant::now();
let transactions = if let Some((
libra_genesis_keypair,
libra_pay_program_id,
_libra_mint_program_id,
libra_keys,
)) = libra_args
{
generate_move_txs(
source,
dest,
reclaim,
&libra_keys,
libra_pay_program_id,
&libra_genesis_keypair.pubkey(),
blockhash,
)
} else {
generate_system_txs(source, dest, reclaim, blockhash)
};
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = (tx_count) as f64 / ns as f64;
let nsps = ns as f64 / (tx_count) as f64;
println!(
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time, {}",
bsps * 1_000_000_f64,
nsps / 1_000_f64,
duration_as_ms(&duration),
blockhash,
);
datapoint_info!(
"bench-tps-generate_txs",
("duration", duration_as_ms(&duration), i64)
);
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
{
let mut shared_txs_wl = shared_txs.write().unwrap();
for chunk in chunks {
shared_txs_wl.push_back(chunk.to_vec());
}
}
}
fn do_tx_transfers<T: Client>(
exit_signal: &Arc<AtomicBool>,
shared_txs: &SharedTransactions,
shared_tx_thread_count: &Arc<AtomicIsize>,
total_tx_sent_count: &Arc<AtomicUsize>,
thread_batch_sleep_ms: usize,
client: &Arc<T>,
) {
loop {
if thread_batch_sleep_ms > 0 {
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
}
let txs;
{
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
txs = shared_txs_wl.pop_front();
}
if let Some(txs0) = txs {
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
println!(
"Transferring 1 unit {} times... to {}",
txs0.len(),
client.as_ref().transactions_addr(),
);
let tx_len = txs0.len();
let transfer_start = Instant::now();
for tx in txs0 {
let now = timestamp();
if now > tx.1 && now - tx.1 > 1000 * 30 {
continue;
}
client
.async_send_transaction(tx.0)
.expect("async_send_transaction in do_tx_transfers");
}
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
total_tx_sent_count.fetch_add(tx_len, Ordering::Relaxed);
println!(
"Tx send done. {} ms {} tps",
duration_as_ms(&transfer_start.elapsed()),
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
);
datapoint_info!(
"bench-tps-do_tx_transfers",
("duration", duration_as_ms(&transfer_start.elapsed()), i64),
("count", tx_len, i64)
);
}
if exit_signal.load(Ordering::Relaxed) {
break;
}
}
}
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
for a in &tx.message().account_keys[1..] {
if client.get_balance(a).unwrap_or(0) >= amount {
return true;
}
}
false
}
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
/// on every iteration. This allows us to replay the transfers because the source is either empty,
/// or full
pub fn fund_keys<T: Client>(
client: &T,
source: &Keypair,
dests: &[Keypair],
total: u64,
max_fee: u64,
mut extra: u64,
) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
println!("funding keys {}", dests.len());
while !notfunded.is_empty() {
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
let mut to_fund = vec![];
println!("creating from... {}", funded.len());
for f in &mut funded {
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
if max_units == 0 {
break;
}
let start = notfunded.len() - max_units as usize;
let fees = if extra > 0 { max_fee } else { 0 };
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
let moves: Vec<_> = notfunded[start..]
.iter()
.map(|k| (k.pubkey(), per_unit))
.collect();
notfunded[start..]
.iter()
.for_each(|k| new_funded.push((k, per_unit)));
notfunded.truncate(start);
if !moves.is_empty() {
to_fund.push((f.0, moves));
}
extra -= 1;
}
// try to transfer a "few" at a time with recent blockhash
// assume 4MB network buffers, and 512 byte packets
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
let mut tries = 0;
// this set of transactions just initializes us for bookkeeping
#[allow(clippy::clone_double_ref)] // sigh
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
let tx = Transaction::new_unsigned_instructions(
system_instruction::transfer_many(&k.pubkey(), &m),
);
(k.clone(), tx)
})
.collect();
let amount = chunk[0].1[0].1;
while !to_fund_txs.is_empty() {
let receivers = to_fund_txs
.iter()
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
println!(
"{} {} to {} in {} txs",
if tries == 0 {
"transferring"
} else {
" retrying"
},
amount,
receivers,
to_fund_txs.len(),
);
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
// re-sign retained to_fund_txes with updated blockhash
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
});
to_fund_txs.iter().for_each(|(_, tx)| {
client.async_send_transaction(tx.clone()).expect("transfer");
});
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
for _ in 0..10 {
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
if to_fund_txs.is_empty() {
break;
}
sleep(Duration::from_millis(100));
}
tries += 1;
}
println!("transferred");
});
println!("funded: {} left: {}", new_funded.len(), notfunded.len());
funded = new_funded;
}
}
pub fn airdrop_lamports<T: Client>(
client: &T,
drone_addr: &SocketAddr,
id: &Keypair,
tx_count: u64,
) -> Result<()> {
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(starting_balance);
println!("starting balance {}", starting_balance);
if starting_balance < tx_count {
let airdrop_amount = tx_count - starting_balance;
println!(
"Airdropping {:?} lamports from {} for {}",
airdrop_amount,
drone_addr,
id.pubkey(),
);
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
match request_airdrop_transaction(&drone_addr, &id.pubkey(), airdrop_amount, blockhash) {
Ok(transaction) => {
let signature = client.async_send_transaction(transaction).unwrap();
client
.poll_for_signature_confirmation(&signature, 1)
.unwrap_or_else(|_| {
panic!(
"Error requesting airdrop: to addr: {:?} amount: {}",
drone_addr, airdrop_amount
)
})
}
Err(err) => {
panic!(
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
err, drone_addr, airdrop_amount
);
}
};
let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
println!("airdrop error {}", e);
starting_balance
});
println!("current balance {}...", current_balance);
metrics_submit_lamport_balance(current_balance);
if current_balance - starting_balance != airdrop_amount {
println!(
"Airdrop failed! {} {} {}",
id.pubkey(),
current_balance,
starting_balance
);
return Err(BenchTpsError::AirdropFailure);
}
}
Ok(())
}
fn compute_and_report_stats(
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
sample_period: u64,
tx_send_elapsed: &Duration,
total_tx_send_count: usize,
) {
// Compute/report stats
let mut max_of_maxes = 0.0;
let mut max_tx_count = 0;
let mut nodes_with_zero_tps = 0;
let mut total_maxes = 0.0;
println!(" Node address | Max TPS | Total Transactions");
println!("---------------------+---------------+--------------------");
for (sock, stats) in maxes.read().unwrap().iter() {
let maybe_flag = match stats.txs {
0 => "!!!!!",
_ => "",
};
println!(
"{:20} | {:13.2} | {} {}",
sock, stats.tps, stats.txs, maybe_flag
);
if stats.tps == 0.0 {
nodes_with_zero_tps += 1;
}
total_maxes += stats.tps;
if stats.tps > max_of_maxes {
max_of_maxes = stats.tps;
}
if stats.txs > max_tx_count {
max_tx_count = stats.txs;
}
}
if total_maxes > 0.0 {
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
let average_max = total_maxes / num_nodes_with_tps as f32;
println!(
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
average_max, nodes_with_zero_tps
);
}
let total_tx_send_count = total_tx_send_count as u64;
let drop_rate = if total_tx_send_count > max_tx_count {
(total_tx_send_count - max_tx_count) as f64 / total_tx_send_count as f64
} else {
0.0
};
println!(
"\nHighest TPS: {:.2} sampling period {}s max transactions: {} clients: {} drop rate: {:.2}",
max_of_maxes,
sample_period,
max_tx_count,
maxes.read().unwrap().len(),
drop_rate,
);
println!(
"\tAverage TPS: {}",
max_tx_count as f32 / duration_as_s(tx_send_elapsed)
);
}
// First transfer 3/4 of the lamports to the dest accounts
// then ping-pong 1/4 of the lamports back to the other account
// this leaves 1/4 lamport buffer in each account
fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
}
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
let mut rnd = GenKeys::new(seed);
let mut total_keys = 0;
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
let mut delta = 1;
while total_keys < count {
extra += delta;
delta *= MAX_SPENDS_PER_TX;
total_keys += delta;
}
(rnd.gen_n_keypairs(total_keys), extra)
}
fn fund_move_keys<T: Client>(
client: &T,
funding_key: &Keypair,
keypairs: &[Keypair],
total: u64,
libra_pay_program_id: &Pubkey,
libra_mint_program_id: &Pubkey,
libra_mint_key: &Keypair,
) {
let (mut blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
info!("creating the libra funding account..");
let libra_funding_key = Keypair::new();
let tx = librapay_transaction::create_account(
funding_key,
&libra_funding_key.pubkey(),
1,
blockhash,
);
let sig = client
.async_send_transaction(tx)
.expect("create_account in generate_and_fund_keypairs");
client.poll_for_signature(&sig).unwrap();
info!("minting to funding keypair");
let tx = librapay_transaction::mint_tokens(
&libra_mint_program_id,
funding_key,
libra_mint_key,
&libra_funding_key.pubkey(),
total,
blockhash,
);
let sig = client
.async_send_transaction(tx)
.expect("create_account in generate_and_fund_keypairs");
client.poll_for_signature(&sig).unwrap();
info!("creating move accounts.. {}", keypairs.len());
let create_len = 8;
let mut funding_time = Measure::start("funding_time");
for (i, keys) in keypairs.chunks(create_len).enumerate() {
if client.get_balance(&keys[0].pubkey()).unwrap_or(0) > 0 {
// already created these accounts.
break;
}
let mut tx_send = Measure::start("poll");
let pubkeys: Vec<_> = keys.iter().map(|k| k.pubkey()).collect();
let tx = librapay_transaction::create_accounts(funding_key, &pubkeys, 1, blockhash);
let ser_size = bincode::serialized_size(&tx).unwrap();
let sig = client
.async_send_transaction(tx)
.expect("create_account in generate_and_fund_keypairs");
tx_send.stop();
let mut poll = Measure::start("poll");
client.poll_for_signature(&sig).unwrap();
poll.stop();
if i % 10 == 0 {
blockhash = client.get_recent_blockhash().unwrap().0;
info!(
"size: {} created {} accounts of {} sig: {}us send: {}us",
ser_size,
i,
(keypairs.len() / create_len),
poll.as_us(),
tx_send.as_us()
);
}
}
funding_time.stop();
info!("funding accounts {}ms", funding_time.as_ms());
let mut sigs = vec![];
let tx_count = keypairs.len();
let amount = total / (tx_count as u64);
for (i, key) in keypairs[..tx_count].iter().enumerate() {
let tx = librapay_transaction::transfer(
libra_pay_program_id,
&libra_mint_key.pubkey(),
funding_key,
&libra_funding_key,
&key.pubkey(),
amount,
blockhash,
);
let sig = client
.async_send_transaction(tx.clone())
.expect("create_account in generate_and_fund_keypairs");
let mut poll_time = Measure::start("poll_start");
let poll_status = client.poll_for_signature(&sig);
poll_time.stop();
info!(
"i: {} poll: {:?} time: {}ms",
i,
poll_status,
poll_time.as_ms()
);
sigs.push((sig, key));
if i % 50 == 0 {
blockhash = client.get_recent_blockhash().unwrap().0;
}
}
for (i, (sig, key)) in sigs.iter().enumerate() {
let mut times = 0;
loop {
match client.poll_for_signature(&sig) {
Ok(_) => {
break;
}
Err(e) => {
info!("e :{:?} waiting times: {} sig: {}", e, times, sig);
times += 1;
sleep(Duration::from_secs(1));
}
}
}
times = 0;
loop {
let balance = librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
if balance < amount {
info!("i: {} balance: {} times: {}", i, balance, times);
times += 1;
sleep(Duration::from_secs(1));
} else { | }
}
if i % 10 == 0 {
info!("funding {} of {}", i, tx_count);
}
}
info!("done..");
}
pub fn generate_and_fund_keypairs<T: Client>(
client: &T,
drone_addr: Option<SocketAddr>,
funding_key: &Keypair,
tx_count: usize,
lamports_per_account: u64,
use_move: bool,
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
info!("Creating {} keypairs...", tx_count * 2);
let (mut keypairs, extra) = generate_keypairs(funding_key, tx_count as u64 * 2);
info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume.
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
let last_keypair_balance = client
.get_balance(&keypairs[tx_count * 2 - 1].pubkey())
.unwrap_or(0);
let mut move_keypairs_ret = None;
if lamports_per_account > last_keypair_balance {
let (_blockhash, fee_calculator) = client.get_recent_blockhash().unwrap();
let account_desired_balance =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
if use_move {
total *= 2;
}
println!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
account_desired_balance, total
);
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &drone_addr.unwrap(), funding_key, total)?;
}
if use_move {
let libra_genesis_keypair = create_genesis(&funding_key, client, 1_000_000);
let libra_mint_program_id = upload_mint_program(&funding_key, client);
let libra_pay_program_id = upload_payment_program(&funding_key, client);
// Generate another set of keypairs for move accounts.
// Still fund the solana ones which will be used for fees.
let seed = [0u8; 32];
let mut rnd = GenKeys::new(seed);
let move_keypairs = rnd.gen_n_keypairs(tx_count as u64 * 2);
fund_move_keys(
client,
funding_key,
&move_keypairs,
total / 2,
&libra_pay_program_id,
&libra_mint_program_id,
&libra_genesis_keypair,
);
move_keypairs_ret = Some((
libra_genesis_keypair,
libra_pay_program_id,
libra_mint_program_id,
move_keypairs,
));
// Give solana keys half and move keys half the lamports.
total /= 2;
}
fund_keys(
client,
funding_key,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
extra,
);
}
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
keypairs.truncate(2 * tx_count);
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
}
#[cfg(test)]
mod tests {
use super::*;
use solana::cluster_info::FULLNODE_PORT_RANGE;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::validator::ValidatorConfig;
use solana_client::thin_client::create_client;
use solana_drone::drone::run_local_drone;
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_block::create_genesis_block;
use std::sync::mpsc::channel;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(20, 0), false);
assert_eq!(should_switch_directions(20, 1), false);
assert_eq!(should_switch_directions(20, 14), false);
assert_eq!(should_switch_directions(20, 15), true);
assert_eq!(should_switch_directions(20, 16), false);
assert_eq!(should_switch_directions(20, 19), false);
assert_eq!(should_switch_directions(20, 20), true);
assert_eq!(should_switch_directions(20, 21), false);
assert_eq!(should_switch_directions(20, 99), false);
assert_eq!(should_switch_directions(20, 100), true);
assert_eq!(should_switch_directions(20, 101), false);
}
fn test_bench_tps_local_cluster(config: Config) {
solana_logger::setup();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
native_instruction_processors: vec![solana_move_loader_program!()],
..ClusterConfig::default()
});
let drone_keypair = Keypair::new();
cluster.transfer(
&cluster.funding_keypair,
&drone_keypair.pubkey(),
100_000_000,
);
let client = create_client(
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
FULLNODE_PORT_RANGE,
);
let (addr_sender, addr_receiver) = channel();
run_local_drone(drone_keypair, addr_sender, None);
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
let lamports_per_account = 100;
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
&client,
Some(drone_addr),
&config.id,
config.tx_count,
lamports_per_account,
config.use_move,
)
.unwrap();
let total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
assert!(total > 100);
}
#[test]
fn test_bench_tps_local_cluster_solana() {
let mut config = Config::default();
config.tx_count = 100;
config.duration = Duration::from_secs(10);
test_bench_tps_local_cluster(config);
}
#[test]
fn test_bench_tps_local_cluster_move() {
let mut config = Config::default();
config.tx_count = 100;
config.duration = Duration::from_secs(20);
config.use_move = true;
test_bench_tps_local_cluster(config);
}
#[test]
fn test_bench_tps_bank_client() {
let (genesis_block, id) = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let clients = vec![BankClient::new(bank)];
let mut config = Config::default();
config.id = id;
config.tx_count = 10;
config.duration = Duration::from_secs(5);
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, false)
.unwrap();
do_bench_tps(clients, config, keypairs, 0, None);
}
#[test]
fn test_bench_tps_fund_keys() {
let (genesis_block, id) = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
for kp in &keypairs {
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
}
}
#[test]
fn test_bench_tps_fund_keys_with_fees() {
let (mut genesis_block, id) = create_genesis_block(10_000);
let fee_calculator = FeeCalculator::new(11);
genesis_block.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_block);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
let max_fee = client
.get_recent_blockhash()
.unwrap()
.1
.max_lamports_per_signature;
for kp in &keypairs {
assert_eq!(
client.get_balance(&kp.pubkey()).unwrap(),
lamports + max_fee
);
}
}
} | break; |
shop.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: proto/shop/shop.proto
package com_example_srv_shop
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Request struct {
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Page int32 `protobuf:"varint,2,opt,name=page,proto3" json:"page,omitempty"`
PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
SearchKey string `protobuf:"bytes,4,opt,name=search_key,json=searchKey,proto3" json:"search_key,omitempty"`
Disable bool `protobuf:"varint,5,opt,name=disable,proto3" json:"disable,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_846ffd47eaa9ea2a, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Request.Unmarshal(m, b)
}
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
}
func (m *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(m, src)
}
func (m *Request) XXX_Size() int {
return xxx_messageInfo_Request.Size(m)
}
func (m *Request) XXX_DiscardUnknown() {
xxx_messageInfo_Request.DiscardUnknown(m)
}
var xxx_messageInfo_Request proto.InternalMessageInfo
func (m *Request) GetId() uint32 {
if m != nil {
return m.Id
}
return 0
}
func (m *Request) GetPage() int32 {
if m != nil {
return m.Page
}
return 0
}
func (m *Request) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *Request) GetSearchKey() string {
if m != nil {
return m.SearchKey
}
return ""
}
func (m *Request) GetDisable() bool {
if m != nil {
return m.Disable
}
return false
}
type Response struct {
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_846ffd47eaa9ea2a, []int{1}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (m *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(m, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
}
func (m *Response) XXX_DiscardUnknown() {
xxx_messageInfo_Response.DiscardUnknown(m)
}
var xxx_messageInfo_Response proto.InternalMessageInfo
func (m *Response) GetId() uint32 {
if m != nil {
return m.Id
}
return 0
}
// 店铺列表
type ShopListItem struct {
Shops []*ShopDetail `protobuf:"bytes,1,rep,name=shops,proto3" json:"shops,omitempty"`
Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ShopListItem) Reset() { *m = ShopListItem{} }
func (m *ShopListItem) String() string { return proto.CompactTextString(m) }
func (*ShopListItem) ProtoMessage() {}
func (*ShopListItem) Descriptor() ([]byte, []int) {
return fileDescriptor_846ffd47eaa9ea2a, []int{2}
}
func (m *ShopListItem) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShopListItem.Unmarshal(m, b)
}
func (m *ShopListItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ShopListItem.Marshal(b, m, deterministic)
}
func (m *ShopListItem) XXX_Merge(src proto.Message) {
xxx_messageInfo_ShopListItem.Merge(m, src)
}
func (m *ShopListItem) XXX_Size() int {
return xxx_messageInfo_ShopListItem.Size(m)
}
func (m *ShopListItem) XXX_DiscardUnknown() {
xxx_messageInfo_ShopListItem.DiscardUnknown(m)
}
var xxx_messageInfo_ShopListItem proto.InternalMessageInfo
func (m *ShopListItem) GetShops() []*ShopDetail {
if m != nil {
return m.Shops
}
return nil
}
func (m *ShopListItem) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
// 店铺详情
type ShopDetail struct {
ShopName string `protobuf:"bytes,1,opt,name=shop_name,json=shopName,proto3" json:"shop_name,omitempty"`
BsNumber int32 `protobuf:"varint,2,opt,name=bs_number,json=bsNumber,proto3" json:"bs_number,omitempty"`
IsActive bool `protobuf:"varint,3,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"`
Addr string `protobuf:"bytes,4,opt,name=addr,proto3" json:"addr,omitempty"`
Lat float32 `protobuf:"fixed32,5,opt,name=lat,proto3" json:"lat,omitempty"`
Lon float32 `protobuf:"fixed32,6,opt,name=lon,proto3" json:"lon,omitempty"`
Phone string `protobuf:"bytes,7,opt,name=phone,proto3" json:"phone,omitempty"`
Industry string `protobuf:"bytes,8,opt,name=industry,proto3" json:"industry,omitempty"`
IndustryId uint32 `protobuf:"varint,9,opt,name=industry_id,json=industryId,proto3" json:"industry_id,omitempty"`
RunningPeriod []string `protobuf:"bytes,10,rep,name=running_period,json=runningPeriod,proto3" json:"running_period,omitempty"`
Md5List []string `protobuf:"bytes,11,rep,name=md5_list,json=md5List,proto3" json:"md5_list,omitempty"`
AppUserId uint32 `protobuf:"varint,12,opt,name=app_user_id,json=appUserId,proto3" json:"app_user_id,omitempty"`
RegionId uint32 `protobuf:"varint,13,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"`
Region string `protobuf:"bytes,14,opt,name=region,proto3" json:"region,omitempty"`
Id uint32 `protobuf:"varint,15,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ShopDetail) Reset() { *m = ShopDetail{} }
func (m *ShopDetail) String() string { return proto.CompactTextString(m) }
func (*ShopDetail) ProtoMessage() {}
func (*ShopDetail) Descriptor() ([]byte, []int) {
return fileDescriptor_846ffd47eaa9ea2a, []int{3}
}
func (m *ShopDetail) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShopDetail.Unmarshal(m, b)
}
func (m *ShopDetail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ShopDetail.Marshal(b, m, deterministic)
}
func (m *ShopDetail) XXX_Merge(src proto.Message) {
xxx_messageInfo_ShopDetail.Merge(m, src)
}
func (m *ShopDetail) XXX_Size() int {
return xxx_messageInfo_ShopDetail.Size(m)
}
func (m *ShopDetail) XXX_DiscardUnknown() {
xxx_messageInfo_ShopDetail.DiscardUnknown(m)
}
var xxx_messageInfo_ShopDetail proto.InternalMessageInfo
func (m *ShopDetail) GetShopName() string {
if m != nil {
return m.ShopName
}
return ""
}
func (m *ShopDetail) GetBsNumber() int32 {
if m != nil {
return m.BsNumber
}
return 0
}
func (m *ShopDetail) GetIsActive() bool {
if m != nil {
return m.IsActive
}
return false
}
func (m *ShopDetail) GetAddr() string {
if m != nil {
return m.Addr
}
return ""
}
func (m *ShopDetail) GetLat() float32 {
if m != nil {
return m.Lat
}
return 0
}
func (m *ShopDetail) GetLon() float32 {
if m != nil {
return m.Lon
}
return 0
}
func (m *ShopDetail) GetPhone() string {
if m != nil {
return m.Phone
}
return ""
}
func (m *ShopDetail) GetIndustry() string {
if m != nil {
return m.Industry
}
return ""
}
func (m *ShopDetail) GetIndustryId() uint32 {
if m != nil {
return m.IndustryId
}
return 0
}
func (m *ShopDetail) GetRunningPeriod() []string {
if m != nil {
return m.RunningPeriod
}
return nil
}
func (m *ShopDetail) GetMd5List() []string {
if m != nil {
return m.Md5List
}
return nil
}
func (m *ShopDetail) GetAppUserId() uint32 {
if m != nil {
return m.AppUserId
}
return 0
}
func (m *ShopDetail) GetRegionId() uint32 {
if m != nil {
return m.RegionId
}
return 0
}
func (m *ShopDetail) GetRegion() string {
if m != nil {
return m.Region
}
return ""
}
func (m *ShopDetail) GetId() uint32 {
if m != nil {
return m.Id
}
return 0
}
func init() {
proto.RegisterType((*Request)(nil), "com.example.srv.shop.Request")
proto.RegisterType((*Response)(nil), "com.example.srv.shop.Response")
proto.RegisterType((*ShopListItem)(nil), "com.example.srv.shop.ShopListItem")
proto.RegisterType((*ShopDetail)(nil), "com.example.srv.shop.ShopDetail")
}
func init() { proto.Register | ptor_846ffd47eaa9ea2a = []byte{
// 542 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x51, 0x6b, 0xd4, 0x40,
0x10, 0x6e, 0xee, 0x7a, 0x97, 0x64, 0xae, 0xad, 0xb2, 0x54, 0x59, 0x4f, 0x5a, 0x43, 0x40, 0xc8,
0x53, 0x84, 0x4a, 0x7d, 0x57, 0x0b, 0x72, 0x5a, 0x8b, 0xa4, 0xf8, 0x26, 0x84, 0xbd, 0xdb, 0xe1,
0x6e, 0xf1, 0xb2, 0xbb, 0xee, 0x6e, 0x8a, 0xed, 0xab, 0xbf, 0xc1, 0x7f, 0xe5, 0x8f, 0x92, 0xdd,
0x24, 0x08, 0xd2, 0x72, 0x2f, 0x77, 0xf3, 0x7d, 0x33, 0x99, 0xfd, 0xe6, 0x9b, 0x6c, 0xe0, 0x89,
0x36, 0xca, 0xa9, 0x57, 0x76, 0xa3, 0x74, 0xf8, 0x29, 0x03, 0x26, 0xc7, 0x2b, 0xd5, 0x94, 0xf8,
0x93, 0x35, 0x7a, 0x8b, 0xa5, 0x35, 0x37, 0xa5, 0xcf, 0xe5, 0xbf, 0x22, 0x88, 0x2b, 0xfc, 0xd1,
0xa2, 0x75, 0xe4, 0x08, 0x46, 0x82, 0xd3, 0x28, 0x8b, 0x8a, 0xc3, 0x6a, 0x24, 0x38, 0x21, 0xb0,
0xaf, 0xd9, 0x1a, 0xe9, 0x28, 0x8b, 0x8a, 0x49, 0x15, 0x62, 0xf2, 0x1c, 0x52, 0xff, 0x5f, 0x5b,
0x71, 0x87, 0x74, 0x1c, 0x12, 0x89, 0x27, 0xae, 0xc5, 0x1d, 0x92, 0x13, 0x00, 0x8b, 0xcc, 0xac,
0x36, 0xf5, 0x77, 0xbc, 0xa5, 0xfb, 0x59, 0x54, 0xa4, 0x55, 0xda, 0x31, 0x9f, 0xf0, 0x96, 0x50,
0x88, 0xb9, 0xb0, 0x6c, 0xb9, 0x45, 0x3a, 0xc9, 0xa2, 0x22, 0xa9, 0x06, 0x98, 0xcf, 0x21, 0xa9,
0xd0, 0x6a, 0x25, 0x2d, 0xfe, 0xaf, 0x22, 0xff, 0x06, 0x07, 0xd7, 0x1b, 0xa5, 0x2f, 0x85, 0x75,
0x0b, 0x87, 0x0d, 0x79, 0x03, 0x13, 0xaf, 0xdc, 0xd2, 0x28, 0x1b, 0x17, 0xb3, 0xb3, 0xac, 0xbc,
0x6f, 0xae, 0xd2, 0x3f, 0x72, 0x81, 0x8e, 0x89, 0x6d, 0xd5, 0x95, 0x93, 0x63, 0x98, 0xac, 0x54,
0x2b, 0x5d, 0x3f, 0x4e, 0x07, 0xf2, 0xdf, 0x63, 0x80, 0x7f, 0xb5, 0x7e, 0x3c, 0x5f, 0x5d, 0x4b,
0xd6, 0x60, 0xd0, 0x90, 0x56, 0x89, 0x27, 0xae, 0x58, 0x13, 0x66, 0x5f, 0xda, 0x5a, 0xb6, 0xcd,
0x12, 0x4d, 0xdf, 0x25, 0x59, 0xda, 0xab, 0x80, 0x7d, 0x52, 0xd8, 0x9a, 0xad, 0x9c, 0xb8, 0xe9,
0x8c, 0x49, 0xaa, 0x44, 0xd8, 0xb7, 0x01, 0x7b, 0x27, 0x19, 0xe7, 0xa6, 0xb7, 0x24, 0xc4, 0xe4,
0x31, 0x8c, 0xb7, 0xcc, 0x05, 0x27, 0x46, 0x95, 0x0f, 0x03, 0xa3, 0x24, 0x9d, 0xf6, 0x8c, 0x92,
0x5e, 0xb3, 0xde, 0x28, 0x89, 0x34, 0x0e, 0x0f, 0x76, 0x80, 0xcc, 0x21, 0x11, 0x92, 0xb7, 0xd6,
0x99, 0x5b, 0x9a, 0x74, 0x1a, 0x07, 0x4c, 0x5e, 0xc0, 0x6c, 0x88, 0x6b, 0xc1, 0x69, 0x1a, 0x6c,
0x84, 0x81, 0x5a, 0x70, 0xf2, 0x12, 0x8e, 0x4c, 0x2b, 0xa5, 0x90, 0xeb, 0x5a, 0xa3, 0x11, 0x8a,
0x53, 0xc8, 0xc6, 0x45, 0x5a, 0x1d, 0xf6, 0xec, 0x97, 0x40, 0x92, 0x67, 0x90, 0x34, 0xfc, 0xbc,
0xde, 0x0a, 0xeb, 0xe8, 0x2c, 0x14, 0xc4, 0x0d, 0x3f, 0xf7, 0x4b, 0x20, 0xa7, 0x30, 0x63, 0x5a,
0xd7, 0xad, 0x45, 0xe3, 0x8f, 0x38, 0x08, 0x47, 0xa4, 0x4c, 0xeb, 0xaf, 0x16, 0xcd, 0x82, 0x7b,
0x27, 0x0c, 0xae, 0x85, 0x92, 0x3e, 0x7b, 0x18, 0xb2, 0x49, 0x47, 0x2c, 0x38, 0x79, 0x0a, 0xd3,
0x2e, 0xa6, 0x47, 0x41, 0x79, 0x8f, 0xfa, 0xad, 0x3f, 0x1a, 0xb6, 0x7e, 0xf6, 0x67, 0x04, 0xc4,
0xef, 0xe5, 0x33, 0x93, 0x6c, 0x8d, 0xe6, 0x1a, 0xcd, 0x8d, 0x58, 0x21, 0xb9, 0x84, 0xe9, 0x7b,
0x83, 0xcc, 0x21, 0xd9, 0xb9, 0xf7, 0xf9, 0xe9, 0xfd, 0x15, 0xc3, 0x8b, 0x96, 0xef, 0x91, 0x2b,
0x88, 0x3f, 0xa0, 0x0b, 0x43, 0x9d, 0x3c, 0x54, 0x1c, 0xae, 0xc6, 0x3c, 0x7f, 0xf8, 0xb4, 0xe1,
0xc5, 0xcc, 0xf7, 0xc8, 0x65, 0xe8, 0xf7, 0xce, 0xdb, 0xbc, 0xa3, 0xdf, 0x4e, 0xf5, 0xf9, 0x1e,
0xf9, 0x08, 0xf1, 0x45, 0x77, 0x3f, 0x76, 0x75, 0xdb, 0x39, 0xe9, 0x72, 0x1a, 0xbe, 0x01, 0xaf,
0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x17, 0xc4, 0xee, 0x8e, 0x1c, 0x04, 0x00, 0x00,
}
| File("proto/shop/shop.proto", fileDescriptor_846ffd47eaa9ea2a) }
var fileDescri |
file_lock_unix.go | // Copyright 2014 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd
package filelock
import (
"io"
"os"
"syscall"
)
// lockCloser hides all of an os.File's methods, except for Close.
type lockCloser struct {
f *os.File
}
func (l lockCloser) Close() error {
return l.f.Close()
}
func Lock(name string) (io.Closer, error) | {
f, err := os.Create(name)
if err != nil {
return nil, err
}
/*
Some people tell me FcntlFlock does not exist, so use flock here
*/
if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
f.Close()
return nil, err
}
// spec := syscall.Flock_t{
// Type: syscall.F_WRLCK,
// Whence: int16(os.SEEK_SET),
// Start: 0,
// Len: 0, // 0 means to lock the entire file.
// Pid: int32(os.Getpid()),
// }
// if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil {
// f.Close()
// return nil, err
// }
return lockCloser{f}, nil
} |
|
label.rs | use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let influx_url = "http://localhost:8888";
let token = "some-token";
let client = influxdb2_client::Client::new(influx_url, token);
println!("{:?}", client.labels().await?);
println!("{:?}", client.labels_by_org("some-org_id").await?);
println!("{:?}", client.find_label("some-label_id").await?);
let mut properties = HashMap::new();
properties.insert("some-key".to_string(), "some-value".to_string());
println!(
"{:?}",
client
.create_label("some-org_id", "some-name", Some(properties))
.await?
);
println!(
"{:?}",
client
.update_label(Some("some-name".to_string()), None, "some-label_id")
.await?
);
println!("{:?}", client.delete_label("some-label_id").await?); | Ok(())
} |
|
Database.go | package database
import "gorm.io/gorm" | DBConn *gorm.DB
) |
var ( |
join_test.go | package hub_test
import (
job "github.com/AgentCoop/go-work"
"github.com/AgentCoop/peppermint/internal/app/node"
_ "github.com/AgentCoop/peppermint/internal/service/hub"
"github.com/AgentCoop/peppermint/internal/service/hub/grpc/client"
"github.com/AgentCoop/peppermint/internal/service/hub/grpc/client/join"
"os"
"testing"
"time"
)
var (
serverAddr = "localhost:9911"
)
func | (t *testing.T) {
os.Args = []string{"testapp", "bootstrap", "--create-db", "--force"}
app := node.NewApp()
<-app.Job().Run()
os.Args = []string{"testapp", "run", "--hub-port=12001", "-n=1"}
appJob := node.NewApp()
appJob.Job().Run()
time.Sleep(50 * time.Millisecond)
//addr, _ := net.ResolveTCPAddr("tcp", "localhost:9911")
//time.Sleep(time.Millisecond)
hubClient := client.NewClient("localhost")
clientJob := job.NewJob(hubClient)
joinCtx := join.NewJoinContext("secret", []string{"my-test-machine", "linux"})
clientJob.AddOneshotTask(hubClient.ConnectTask)
clientJob.AddTask(joinCtx.JoinTask)
<-clientJob.Run()
_, err := clientJob.GetInterruptedBy()
if err != nil {
t.Error(err)
}
}
| TestJoinHello |
grammar.py | from lark import Lark, Transformer
import operator
import os
class Condition:
def __init__(self):
filename = os.path.join(
os.path.dirname(__file__),
'grammars/condition.g'
)
with open(filename) as grammar_file:
self.parser = Lark(
grammar_file.read(),
start='or_test',
parser='lalr',
)
def parse(self, string):
''' returns the tree '''
return self.parser.parse(string)
class ConditionTransformer(Transformer):
''' can be used to transform a tree like this:
ConditionTransformer(values).transform(tree)
where values is taken from the state of the execution '''
def __init__(self, values):
self._values = values
def op_eq(self, _):
return operator.eq
def op_ne(self, _):
return operator.ne
def op_lt(self, _):
return operator.lt
def op_lte(self, _):
return operator.le
def op_gt(self, _):
return operator.gt
def op_gte(self, _):
return operator.ge
def op_or(self, _):
return operator.or_
def op_and(self, _):
return operator.and_
def op_not(self, _):
return operator.not_
def variable(self, tokens):
# just copy the token as string
return tokens[0][:]
def obj_id(self, tokens):
# copy the token as string
return tokens[0][:]
def ref(self, tokens):
obj_id, member = tokens
return self._values[obj_id][member]
def string(self, tokens):
return tokens[0][1:-1]
def number(self, tokens):
return float(tokens[0])
def test_aux(self, tokens):
if len(tokens) == 1:
|
if len(tokens) == 2:
op, right = tokens
return op(right)
left, op, right = tokens
return op(left, right)
def or_test(self, tokens):
return self.test_aux(tokens)
def and_test(self, tokens):
return self.test_aux(tokens)
def not_test(self, tokens):
return self.test_aux(tokens)
def comparison(self, tokens):
return self.test_aux(tokens)
def atom_expr(self, tokens):
return self.test_aux(tokens)
| return tokens[0] |
init.go | package grifts
import (
"github.com/arschles/go-in-5-minutes/episode24/actions"
"github.com/gobuffalo/buffalo" | )
func init() {
buffalo.Grifts(actions.App())
} | |
ocr_recognizer.py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="0" #CUDA_VISIBLE_DEVICES=0 (always use the first GPU only)
import time
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
from utils import AttnLabelConverter
from model import Model
from demo import detect_ocr
from craft.craft import CRAFT
from collections import OrderedDict
#####################################
# 21.06.04 Astrid
# https://github.com/googleapis/oauth2client/issues/642#issuecomment-279643203
'''
Solving this error
File "./../src/ocr_recog/ocr_recognizer.py", line 41, in __init__
self.opt_craft, self.opt_recog = self.setup_parser()
File "./../src/ocr_recog/ocr_recognizer.py", line 120, in setup_parser
parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')
File "/usr/lib/python3.6/argparse.py", line 1635, in __init__
prog = _os.path.basename(_sys.argv[0])
AttributeError: module 'sys' has no attribute 'argv'
'''
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
#####################################
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
class OCRRecognizer:
def __init__(self):
self.net = None #detect
self.model = None #recog
self.converter = None
#self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.res_imagefileName = None
self.opt_craft, self.opt_recog = self.setup_parser()
self.args_craft= vars(self.opt_craft)
self.args = vars(self.opt_recog)
self.detect_time = 0.0
self.recog_time = 0.0
self.total_time =0.0
# print("~~~~~~~~ Hyperparameters used: ~~~~~~~")
# for x, y in self.args.items():
# print("{} : {}".format(x, y))
self.__dict__.update(self.args_craft)
self.__dict__.update(self.args)
def initialize(self):
|
def setup_parser(self):
"""
Sets up an argument parser
"""
parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')
parser_craft.add_argument('--craft_trained_model', default='weights/craft_mlt_25k.pth', type=str,
help='pretrained model')
parser_craft.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser_craft.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser_craft.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser_craft.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')
parser_craft.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser_craft.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser_craft.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser_craft.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser_craft.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')
parser_craft.add_argument('--result_folder', default='./results/', type=str, help='result folder path')
parser_craft.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser_craft.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str,
help='pretrained refiner model')
args_craft = parser_craft.parse_args()
parser_recog = argparse.ArgumentParser(description='ocr recognition')
parser_recog.add_argument('--image_path', help='path to image_folder or image_file which contains text images')
parser_recog.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser_recog.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser_recog.add_argument('--saved_model', help="path to saved_model to evaluation")
parser_recog.add_argument('--logfilepath', help="path to log to demo")
""" Data processing """
parser_recog.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser_recog.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser_recog.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser_recog.add_argument('--rgb', action='store_true', help='use rgb input')
# parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser_recog.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',
help='character label')
parser_recog.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser_recog.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
""" Model Architecture """
parser_recog.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser_recog.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser_recog.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser_recog.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
args_recog= parser_recog.parse_args()
return args_craft , args_recog
def apply(self, image, timestamp, save_img=False):
#coordinate : list
save_log = False
pred, timestamp = detect_ocr(self, image, timestamp, save_img, save_log)
return pred, timestamp | start = time.time()
# self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_addKorean_synth/best_accuracy.pth'
# self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train/craft_mlt_25k.pth'
# self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy.pth'
# self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy_craft.pth'
#
# official
self.saved_model = './data_ocr/best_accuracy.pth'
self.craft_trained_model = './data_ocr/best_craft.pth'
self.logfilepath = './data_ocr/log_ocr_result.txt'
if torch.cuda.is_available():
self.device = torch.device('cuda')
self.cuda = True
cudnn.benchmark = False
else:
self.device = torch.device('cpu')
self.cuda = False
cudnn.benchmark = True
""" vocab / character number configuration """
# if self.sensitive:
# self.character = string.printable[:-6] # same with ASTER setting (use 94 char).
cudnn.deterministic = True
#self.num_gpu = torch.cuda.device_count()
""" model configuration """
# detetion
self.net = CRAFT(self).to(self.device) # initialize
print('Loading detection weights from checkpoint ' + self.craft_trained_model)
self.net.load_state_dict(copyStateDict(torch.load(self.craft_trained_model, map_location=self.device)))
#self.net = torch.nn.DataParallel(self.net).to(self.device)
self.net.to(self.device)
self.converter = AttnLabelConverter(self.character)
self.num_class = len(self.converter.character)
if self.rgb:
self.input_channel = 3
self.model = Model(self, self.num_class).to(self.device)
# load model
#self.model = torch.nn.DataParallel(self.model).to(self.device)
print('Loading recognition weights from checkpoint %s' % self.saved_model)
#ckpt = torch.load(self.saved_model, map_location=self.device)
self.model.load_state_dict(torch.load(self.saved_model, map_location=self.device))
self.model.to(self.device)
print('Initialization Done! It tooks {:.2f} sec.\n'.format(time.time() - start))
return True |
DeleteRegexMatchSetCommand.ts | import { ServiceInputTypes, ServiceOutputTypes, WAFRegionalClientResolvedConfig } from "../WAFRegionalClient.ts";
import { DeleteRegexMatchSetRequest, DeleteRegexMatchSetResponse } from "../models/models_0.ts";
import {
deserializeAws_json1_1DeleteRegexMatchSetCommand,
serializeAws_json1_1DeleteRegexMatchSetCommand,
} from "../protocols/Aws_json1_1.ts";
import { getSerdePlugin } from "../../middleware-serde/mod.ts";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "../../protocol-http/mod.ts";
import { Command as $Command } from "../../smithy-client/mod.ts";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "../../types/mod.ts";
export interface DeleteRegexMatchSetCommandInput extends DeleteRegexMatchSetRequest {}
export interface DeleteRegexMatchSetCommandOutput extends DeleteRegexMatchSetResponse, __MetadataBearer {}
/**
* <note>
* <p>This is <b>AWS WAF Classic</b> documentation. For
* more information, see <a href="https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html">AWS
* WAF Classic</a> in the developer guide.</p>
* <p>
* <b>For the latest version of AWS
* WAF</b>, use the AWS WAFV2 API and see the <a href="https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html">AWS WAF Developer Guide</a>. With the latest version, AWS WAF has a single set of endpoints for regional and global use. </p>
* </note>
* <p>Permanently deletes a <a>RegexMatchSet</a>. You can't delete a <code>RegexMatchSet</code> if it's still used in any <code>Rules</code>
* or if it still includes any <code>RegexMatchTuples</code> objects (any filters).</p>
* <p>If you just want to remove a <code>RegexMatchSet</code> from a <code>Rule</code>, use <a>UpdateRule</a>.</p>
* <p>To permanently delete a <code>RegexMatchSet</code>, perform the following steps:</p>
* <ol> | * </li>
* <li>
* <p>Use <a>GetChangeToken</a> to get the change token that you provide in the <code>ChangeToken</code> parameter of a
* <code>DeleteRegexMatchSet</code> request.</p>
* </li>
* <li>
* <p>Submit a <code>DeleteRegexMatchSet</code> request.</p>
* </li>
* </ol>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
* import { WAFRegionalClient, DeleteRegexMatchSetCommand } from "../../client-waf-regional/mod.ts";
* // const { WAFRegionalClient, DeleteRegexMatchSetCommand } = require("@aws-sdk/client-waf-regional"); // CommonJS import
* const client = new WAFRegionalClient(config);
* const command = new DeleteRegexMatchSetCommand(input);
* const response = await client.send(command);
* ```
*
* @see {@link DeleteRegexMatchSetCommandInput} for command's `input` shape.
* @see {@link DeleteRegexMatchSetCommandOutput} for command's `response` shape.
* @see {@link WAFRegionalClientResolvedConfig | config} for command's `input` shape.
*
*/
export class DeleteRegexMatchSetCommand extends $Command<
DeleteRegexMatchSetCommandInput,
DeleteRegexMatchSetCommandOutput,
WAFRegionalClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: DeleteRegexMatchSetCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: WAFRegionalClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<DeleteRegexMatchSetCommandInput, DeleteRegexMatchSetCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "WAFRegionalClient";
const commandName = "DeleteRegexMatchSetCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: DeleteRegexMatchSetRequest.filterSensitiveLog,
outputFilterSensitiveLog: DeleteRegexMatchSetResponse.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: DeleteRegexMatchSetCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_json1_1DeleteRegexMatchSetCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<DeleteRegexMatchSetCommandOutput> {
return deserializeAws_json1_1DeleteRegexMatchSetCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} | * <li>
* <p>Update the <code>RegexMatchSet</code> to remove filters, if any. For more information, see <a>UpdateRegexMatchSet</a>.</p> |
basic_perf.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {ɵɵdefineComponent} from '../../src/render3/index';
import {ɵɵcontainer, ɵɵcontainerRefreshEnd, ɵɵcontainerRefreshStart, ɵɵelementEnd, ɵɵelementStart, ɵɵembeddedViewEnd, ɵɵembeddedViewStart, ɵɵtext} from '../../src/render3/instructions/all';
import {RenderFlags} from '../../src/render3/interfaces/definition';
import {document, renderComponent} from './render_util';
describe('iv perf test', () => {
const count = 100000;
const noOfIterations = 10;
describe('render', () => {
for (let iteration = 0; iteration < noOfIterations; iteration++) {
it(`${iteration}. create ${count} divs in DOM`, () => {
const start = new Date().getTime();
const container = document.createElement('div');
for (let i = 0; i < count; i++) {
const div = document.createElement('div');
div.appendChild(document.createTextNode('-'));
container.appendChild(div);
}
const end = new Date().getTime();
log(`${count} DIVs in DOM`, (end - start) / count);
});
it(`${iteration}. create ${count} divs in Render3`, () => {
class Component {
static ɵfac = () => new Component;
static ɵcmp = ɵɵdefineComponent({
type: Component,
selectors: [['div']],
decls: 1,
vars: 0,
template: function Template(rf: RenderFlags, ctx: any) {
if (rf & RenderFlags.Create) {
ɵɵcontainer(0);
}
if (rf & RenderFlags.Update) {
ɵɵcontainerRefreshStart(0);
{
for (let i = 0; i < count; i++) {
let rf0 = ɵɵembeddedViewStart(0, 2, 0);
{
if (rf0 & RenderFlags.Create) {
ɵɵelementStart(0, 'div');
ɵɵtext(1, '-');
ɵɵelementEnd();
}
}
ɵɵembeddedViewEnd();
}
}
ɵɵcontainerRefreshEnd();
}
}
});
}
const start = new Date().getTime();
renderComponent(Component);
const end = new Date().getTime();
log(`${count} DIVs in Render3`, (end - start) / count);
});
}
});
});
function log(text: string, duration: number) {
| / tslint:disable-next-line:no-console
console.log(text, duration * 1000, 'ns');
}
| / |
diep0txfsts.rs | #[doc = "Reader of register DIEP0TXFSTS"]
pub type R = crate::R<u32, super::DIEP0TXFSTS>;
#[doc = "Reader of field `SPCAVAIL`"]
pub type SPCAVAIL_R = crate::R<u16, u16>;
impl R {
#[doc = "Bits 0:15 - TxFIFO Space Available"]
#[inline(always)]
pub fn spcavail(&self) -> SPCAVAIL_R |
}
| {
SPCAVAIL_R::new((self.bits & 0xffff) as u16)
} |
ShellHeader.qunit.js | // Copyright (c) 2009-2017 SAP SE, All Rights Reserved
/**
* @fileOverview QUnit tests for sap.ushell.components.HeaderManager
*/
sap.ui.require([
"jquery.sap.global",
"sap/ui/core/IconPool",
"sap/ushell/ui/ShellHeader",
"sap/ushell/ui/shell/ShellAppTitle",
"sap/ushell/ui/shell/ShellHeadItem"
], function (jQuery, IconPool, ShellHeader, ShellAppTitle, ShellHeadItem) {
"use strict";
/* global QUnit, sinon */
// for the AppTitle control
sap.ushell.Container = {
getService: function () {
return {
isEnabled: function () {
return true;
}
};
}
};
var oShellHeader;
QUnit.module("basic test", {
beforeEach: function (assert) {
var done = assert.async();
var delegate = {
onAfterRendering: function () {
oShellHeader.removeDelegate(delegate);
done();
}
};
if (oShellHeader) {
oShellHeader.destroy();
}
oShellHeader = new ShellHeader("shell-header", {
logo: jQuery.sap.getModulePath("sap.ushell") + '/themes/base/img/sap_55x27.png',
showLogo: true,
visible: true,
headItems: [
new ShellHeadItem("backBtn", {icon: IconPool.getIconURI("back"), ariaLabel: "Back"})
],
headEndItems: [
new ShellHeadItem("sf", {icon: IconPool.getIconURI("search"), ariaLabel: "Search"})
],
title: "Subtitle with a long text",
appTitle: new ShellAppTitle("shellAppTitle", { text: "AppTitle with a long text" }),
search: new sap.m.Input()
});
oShellHeader.createUIArea("canvas");
oShellHeader.addDelegate(delegate);
},
afterEach: {
}
});
QUnit.test("Logo linked if not on homepage, navigate home", function (assert) {
var done = assert.async();
var oDelegate = {
onAfterRendering: function () {
oShellHeader.removeDelegate(oDelegate);
assert.equal(jQuery(".sapUshellShellIco").attr("href"), "#Shell-home", "Logo is linked");
assert.equal(jQuery(".sapUshellShellIco").attr("tabindex"), 0, "Tabindex is set correct for logo");
assert.equal(jQuery(".sapUshellShellIco").attr("aria-label"), "Home", "Aria-label is set correct for logo");
// Navigate home
var oLogo = oShellHeader.$("logo")[0];
oShellHeader.onsapspace({
target: oLogo
});
assert.strictEqual(oLogo.href, window.location.href, "Navigate home by space on the logo");
done();
}
};
if (oShellHeader) {
oShellHeader.destroy();
}
window.hasher = { getHash: sinon.stub().returns('aaa-bbb-ccc') };
oShellHeader = new ShellHeader("shell-header", {
homeUri: "#Shell-home"
});
oShellHeader.createUIArea("canvas");
oShellHeader.addDelegate(oDelegate);
});
QUnit.test("Logo not linked on homepage", function (assert) {
var done = assert.async();
var oDelegate = {
onAfterRendering: function () {
assert.notOk(jQuery(".sapUshellShellIco").attr("tabindex"), "tabindex is not set");
assert.notOk(jQuery(".sapUshellShellIco").attr("title"), "title is not set");
oShellHeader.removeDelegate(oDelegate);
done();
}
};
if (oShellHeader) {
oShellHeader.destroy();
}
window.hasher = { getHash: sinon.stub().returns("Shell-home") };
oShellHeader = new ShellHeader("shell-header", {
visible: true,
homeUri: "#Shell-home"
});
oShellHeader.createUIArea("canvas");
oShellHeader.addDelegate(oDelegate);
});
QUnit.test("Rendering", function (assert) {
assert.ok(oShellHeader.getId() === "shell-header", "Shell Header is rendered");
assert.ok(jQuery("#shellAppTitle .sapUshellHeadTitle").text() === oShellHeader.getAppTitle().getText(), "Apptitle is rendered");
assert.ok(jQuery(".sapUshellShellHeadSubtitle .sapUshellHeadTitle").text() === oShellHeader.getTitle(), "Title is rendered");
assert.ok(jQuery(".sapUshellShellIco").length === 1, "Logo is rendered");
assert.ok(jQuery(".sapUshellShellIco").attr("id") === "shell-header-logo", "Logo has an ID");
assert.ok(jQuery("#sf").length === 1, "Search button is rendered");
});
QUnit.test("Test that accessibility property is set correctly", function (assert) {
var aHeadItems = oShellHeader.getHeadItems(),
aHeadEndItems = oShellHeader.getHeadEndItems();
function | (oItem) {
if (!oItem.getDomRef()) {
return;
}
var jQueryItem = jQuery(oItem.getDomRef()),
sId = oItem.getId();
assert.equal(jQueryItem.attr("tabindex"), 0, "tabindex is set correctly for ShellHeaderItem: " + sId);
assert.equal(jQueryItem.attr("role"), "button", "role is set correctly for ShellHeaderItem: " + sId);
assert.ok(!!jQueryItem.attr("aria-label"), "aria-label is not empty for ShellHeaderItem: " + sId);
}
aHeadItems.forEach(assertShellHeaderItem);
aHeadEndItems.forEach(assertShellHeaderItem);
});
QUnit.test("_handleFocus:", function (assert) {
[
{
sTestDescription: "navigation from outside and navigation direction forward, no HeadItems",
bFromOutside: true,
bForwardNavigation: true,
bExpectedFocusOnShell: true,
bExpectedFocusOnShellHeadItem: false,
bExpectedFocusOnAppTitle: true,
bExpectedFocusOnShellHeadEndItem: false,
bExpectedHandleEventUsingExternalKeysHandlerCalled: false
},
{
sTestDescription: "navigation from outside and navigation direction forward, with HeadItems",
bFromOutside: true,
bForwardNavigation: true,
bShellHeadItems: true,
bExpectedFocusOnShell: true,
bExpectedFocusOnShellHeadItem: true,
bExpectedFocusOnAppTitle: false,
bExpectedFocusOnShellHeadEndItem: false,
bExpectedHandleEventUsingExternalKeysHandlerCalled: false
},
{
sTestDescription: "navigation from outside and navigation direction backwards, no HeadEndItems",
bFromOutside: true,
bForwardNavigation: false,
bExpectedFocusOnShell: true,
bExpectedFocusOnShellHeadItem: false,
bExpectedFocusOnAppTitle: true,
bExpectedFocusOnShellHeadEndItem: false,
bExpectedHandleEventUsingExternalKeysHandlerCalled: false
},
{
sTestDescription: "navigation from outside and navigation direction backwards, with HeadEndItems",
bFromOutside: true,
bForwardNavigation: false,
bShellHeadEndItems: true,
bExpectedFocusOnShell: true,
bExpectedFocusOnShellHeadItem: false,
bExpectedFocusOnAppTitle: false,
bExpectedFocusOnShellHeadEndItem: true,
bExpectedHandleEventUsingExternalKeysHandlerCalled: false
},
{
sTestDescription: "navigation from inside and navigation direction backwards",
bFromOutside: false,
bForwardNavigation: false,
bExpectedFocusOnShell: false,
bExpectedFocusOnShellHeadItem: false,
bExpectedFocusOnAppTitle: false,
bExpectedFocusOnShellHeadEndItem: false,
bExpectedHandleEventUsingExternalKeysHandlerCalled: true
}
].forEach(function (oFixture) {
// Arrange
var bHandleEventUsingExternalKeysHandlerCalled = false;
var oAccessKeyHandler = {
fromOutside: oFixture.bFromOutside,
bForwardNavigation: oFixture.bForwardNavigation,
bFocusOnShell: true,
_handleEventUsingExternalKeysHandler: function () {
bHandleEventUsingExternalKeysHandlerCalled = true;
}
};
var oFocusResult = {
bShellHeadItem: false,
bAppTitle: false,
bShellHeadEndItem: false
};
var fnGetHeadItemsStub = sinon.stub(oShellHeader, "getHeadItems",
function () {
return oFixture.bShellHeadItems ? [{
focus: function () {
oFocusResult.bShellHeadItem = true;
}
}] : [];
}
),
fnGetAppTitleStub = sinon.stub(oShellHeader, "getAppTitle").returns({
focus: function () {
oFocusResult.bAppTitle = true;
}
}),
fnGetHeadEndItemsStub = sinon.stub(oShellHeader, "getHeadEndItems",
function () {
return oFixture.bShellHeadEndItems ? [{
focus: function () {
oFocusResult.bShellHeadEndItem = true;
}
}] : [];
}
);
oShellHeader.setAccessKeyHandler(oAccessKeyHandler);
// Act
oShellHeader._handleFocus();
// Assert
assert.strictEqual(
oAccessKeyHandler.bFocusOnShell,
oFixture.bExpectedFocusOnShell,
"Focus was (not) set on the shell when " + oFixture.sTestDescription);
assert.strictEqual(
oFocusResult.bShellHeadItem,
oFixture.bExpectedFocusOnShellHeadItem,
"Focus was (not) set on the first shellHeadItem when " + oFixture.sTestDescription);
assert.strictEqual(
oFocusResult.bAppTitle,
oFixture.bExpectedFocusOnAppTitle,
"Focus was (not) set on the appTitle when " + oFixture.sTestDescription);
assert.strictEqual(
oFocusResult.bShellHeadEndItem,
oFixture.bExpectedFocusOnShellHeadEndItem,
"Focus was (not) set on the last shellHeadEndItem when " + oFixture.sTestDescription);
assert.strictEqual(
bHandleEventUsingExternalKeysHandlerCalled,
oFixture.bExpectedHandleEventUsingExternalKeysHandlerCalled,
"_handleEventUsingExternalKeysHandler was (not) called when " + oFixture.sTestDescription);
fnGetAppTitleStub.restore();
fnGetHeadItemsStub.restore();
fnGetHeadEndItemsStub.restore();
});
});
QUnit.test("Search State", function (assert) {
var done = assert.async();
var _afterOpen = {
onAfterRendering: function () { // after search open
oShellHeader.removeDelegate(_afterOpen);
var searchContainer = jQuery("#shell-header-hdr-search");
var maxWidth = searchContainer[0].style.maxWidth;
assert.strictEqual(maxWidth, "10rem", "Search field width is correctly set");
assert.strictEqual(searchContainer.width() > 0, true, "Search Field container is visible");
// close search
oShellHeader.setSearchState("COL", 10, true);
oShellHeader.addDelegate(_afterClose);
}
};
var _afterClose = {
onAfterRendering: function () { // after search close
oShellHeader.removeDelegate(_afterClose);
var searchContainer = jQuery("#shell-header-hdr-search");
var maxWidth = searchContainer[0].style.maxWidth;
assert.strictEqual(maxWidth, "0rem", "Search field width is correctly set");
assert.strictEqual(searchContainer.width(), 0, "Search Field container is invisible");
done();
}
};
// open search
oShellHeader.setSearchState("EXP", 10, true);
oShellHeader.addDelegate(_afterOpen);
});
});
| assertShellHeaderItem |
port.service.spec.ts | import { TestBed } from '@angular/core/testing';
import { PortService } from './port.service';
| it('should be created', () => {
const service: PortService = TestBed.get(PortService);
expect(service).toBeTruthy();
});
}); | describe('PortService', () => {
beforeEach(() => TestBed.configureTestingModule({}));
|
bot_base.py | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import collections
import collections.abc
import inspect
import sys
import traceback
from typing import Any, Callable, List, TYPE_CHECKING, Optional, TypeVar, Type, Union
import disnake
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .common_bot_base import CommonBotBase
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from disnake.message import Message
from disnake.interactions import ApplicationCommandInteraction
from ._types import (
Check,
CoroFunc,
)
ApplicationCommandInteractionT = TypeVar(
"ApplicationCommandInteractionT", bound=ApplicationCommandInteraction, covariant=True
)
AnyMessageCommandInter = Any # Union[ApplicationCommandInteraction, UserCommandInteraction]
AnyUserCommandInter = Any # Union[ApplicationCommandInteraction, UserCommandInteraction]
P = ParamSpec("P")
__all__ = (
"when_mentioned",
"when_mentioned_or",
"BotBase",
)
MISSING: Any = disnake.utils.MISSING
T = TypeVar("T")
CFT = TypeVar("CFT", bound="CoroFunc")
CXT = TypeVar("CXT", bound="Context")
def when_mentioned(bot: BotBase, msg: Message) -> List[str]:
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
"""
# bot.user will never be None when this is called
return [f"<@{bot.user.id}> ", f"<@!{bot.user.id}> "] # type: ignore
def when_mentioned_or(*prefixes: str) -> Callable[[BotBase, Message], List[str]]:
"""A callable that implements when mentioned or other prefixes provided.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
Example
--------
.. code-block:: python3
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
.. note::
This callable returns another callable, so if this is done inside a custom
callable, you must call the returned callable, for example:
.. code-block:: python3
async def get_prefix(bot, message):
extras = await prefixes_for(message.guild) # returns a list
return commands.when_mentioned_or(*extras)(bot, message)
See Also
----------
:func:`.when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return "<default-help-command>"
_default: Any = _DefaultRepr()
class BotBase(CommonBotBase, GroupMixin):
def __init__(
self,
command_prefix: Optional[Union[str, List[str], Callable]] = None,
help_command: HelpCommand = _default,
description: str = None,
**options: Any,
):
super().__init__(**options)
self.command_prefix = command_prefix
self._checks: List[Check] = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description: str = inspect.cleandoc(description) if description else ""
self.strip_after_prefix: bool = options.get("strip_after_prefix", False)
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
# internal helpers
async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:
"""|coro|
The default command error handler provided by the bot.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get("on_command_error", None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f"Ignoring exception in command {context.command}:", file=sys.stderr)
traceback.print_exception(
type(exception), exception, exception.__traceback__, file=sys.stderr
)
# global check registration
def add_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`,
:meth:`.check_once`, :meth:`.slash_command_check` and etc.
If none of bool params are specified, the check is for
text commands only.
Parameters
-----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
If none of bool params are specified, the check is for
text commands only.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def | (self, func: T) -> T:
r"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`.check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check
def check_commands(ctx):
return ctx.command.qualified_name in allowed_commands
"""
# T was used instead of Check to ensure the type matches on return
self.add_check(func) # type: ignore
return func
def check_once(self, func: CFT) -> CFT:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
When using this function the :class:`.Context` sent to a group subcommand
may only parse the parent command and not the subcommands due to it
being invoked once per :meth:`.Bot.invoke` call.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
# type-checker doesn't distinguish between functions and methods
return await disnake.utils.async_all(f(ctx) for f in data) # type: ignore
def before_invoke(self, coro: CFT) -> CFT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro: CFT) -> CFT:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
# extensions
def _remove_module_references(self, name: str) -> None:
super()._remove_module_references(name)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# help command stuff
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError("help_command must be a subclass of HelpCommand")
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# command processing
async def get_prefix(self, message: Message) -> Optional[Union[List[str], str]]:
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`disnake.Message`
The message context to get the prefix of.
Returns
--------
Optional[Union[List[:class:`str`], :class:`str`]]
A list of prefixes or a single prefix that the bot is
listening for. None if the bot isn't listening for prefixes.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await disnake.utils.maybe_coroutine(prefix, self, message)
if ret is None:
return None
if not isinstance(ret, str):
try:
ret = list(ret) # type: ignore
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError(
"command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}"
)
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
-----------
message: :class:`disnake.Message`
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
--------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id: # type: ignore
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if prefix is None:
return ctx
elif isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# if the context class' __init__ consumes something from the view this
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = disnake.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError(
"get_prefix must return either a string or a list of string, "
f"not {prefix.__class__.__name__}"
)
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError(
"Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}"
)
# Getting here shouldn't happen
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch("command", ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure("The global check once functions failed.")
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch("command_completion", ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch("command_error", ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`disnake.Message`
The message to process commands for.
"""
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
| check |
message.py | from pyrogram import filters
from pyrogram.handlers import MessageHandler
from helpers import is_youtube
from ytdl import download
import player
from config import LOG_GROUP
async def | (client, message):
if message.text.startswith("/"):
return
if not is_youtube(message.text):
await message.reply_text("This (link) is not valid.")
return
if "list=" in message.text:
await message.reply_text("Send me a video link, not a playlist link.")
return
await message.reply_text("Download scheduled.", quote=True)
download(
(
message.reply_text,
("Downloading...",)
),
(
message.reply_text,
(f"Downloaded and scheduled to play at position {player.q.qsize() + 1}.",)
),
[
player.play,
[
None,
(
message.reply_text,
("Playing...",)
),
(
message.reply_text,
("Finished playing...",)
),
None,
None,
message.from_user.id,
message.from_user.first_name,
[
client.send_message,
[
LOG_GROUP,
"<b>NOW PLAYING</b>\n"
"Title: <a href=\"{}\">{}</a>\n"
"Requested By: <a href=\"tg://user?id={}\">{}</a>"
]
] if LOG_GROUP else None
]
],
message.text,
)
__handlers__ = [
[
MessageHandler(
message,
filters.text
& filters.private
),
2
]
]
| message |
mysqlrestore.go | package cmd
import (
"context"
"github.com/spf13/cobra"
"github.com/mittwald/brudi/pkg/source"
"github.com/mittwald/brudi/pkg/source/mysqlrestore"
)
var (
mysqlRestoreCmd = &cobra.Command{
Use: "mysqlrestore",
Short: "restores from mysqldump ",
Long: "Restores a given database server with given arguments",
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
err := source.DoRestoreForKind(ctx, mysqlrestore.Kind, cleanup, useRestic, useResticForget)
if err != nil |
},
}
)
func init() {
rootCmd.AddCommand(mysqlRestoreCmd)
}
| {
panic(err)
} |
deleteExchanges.js | const debug = require('debug')('rascal:tasks:deleteExchanges');
const _ = require('lodash');
const async = require('async');
module.exports = _.curry((config, ctx, next) => {
async.eachSeries(
_.keys(config.exchanges),
(name, callback) => {
deleteExchange(ctx.channel, config.exchanges[name], callback);
},
(err) => {
next(err, config, ctx);
}
);
});
function | (channel, config, next) {
if (config.fullyQualifiedName === '') return next();
debug('Deleting exchange: %s', config.fullyQualifiedName);
channel.deleteExchange(config.fullyQualifiedName, {}, next);
}
| deleteExchange |
parse_scheme_table.py | #!/usr/bin/env python
# Usage: ./parse_scheme_table.py filename1 [filename2 filename3 ...]
# Input: fortran filenames with doxygen-compliant and CCPP-compliant physics schemes; the argument tables should have the following format:
# !! \section arg_table_schemename_run
# !! | local_name | standard_name | long_name | units | rank | type | kind | intent | optional |
# !! |----------------|-------------------------------------------------------|------------------------------------|---------|------|---------|-----------|--------|----------|
# !! | im | horizontal_loop_extent | horizontal loop extent, start at 1 | index | 0 | integer | | in | F |
# !! | ix | horizontal_dimension | horizontal dimension | index | 0 | integer | | in | F |
# !! | ... | ... | | | | | | | |
# !!
# Notes on the input format:
# - the "\section arg_table_SubroutineName" command denotes the start of the table; SubroutineName must match the name of the subroutine that the argument table describes
# - each line of the table must begin with the doxygen-delimiter '!!'
# - table headers are the first row; right now, the only ones parsed into XML (only ones required) are 'local var name' => id, 'longname' => name, units => units, rank => rank, type => type
# - the second row must have the |---|-----| format
# - after the last row of the table, there should be a blank doxygen line (only '!!') to denote the end of the table
# Output: for each filename specified, this routine converts the argument tables for all subroutines (*_init, *_run, *_finalize) into an XML file suitable to be used with mkcap.py (which generates the fortran code for the scheme cap)
# - the script generates a separate file for each module within the given files
import argparse #needed for command line argument filenames
from xml.etree import ElementTree as ET #needed for writing out XML
#subroutine for writing "pretty" XML; copied from http://effbot.org/zone/element-lib.htm#prettyprint
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#set up the command line argument parser
parser = argparse.ArgumentParser()
#the only arguments are a list of filenames to parse
parser.add_argument('file', help='paths to fortran source code to parse for generating CCPP scheme XML files', nargs='*')
args = parser.parse_args()
def parse_scheme_tables(files):
xmlfiles = []
#for each filename provided, parse it and output one XML file
for i in range(len(files)):
filename = files[i]
#read all lines of the file at once
with (open(filename, 'r')) as file:
file_lines = file.readlines()
#find all modules within the file, and save the start and end lines
module_names = []
mod_begin_lines = []
mod_end_lines = []
line_counter = 0
for line in file_lines:
words = line.split()
for j in range(len(words)):
#check for the word 'module', that it is the first word in the line, and that a module name exists afterward
if words[j].lower() == 'module' and j+1 <= len(words)-1 and j == 0:
mod_begin_lines.append(line_counter)
module_names.append(words[j+1].lower().strip())
if line.lower().find('end module') >= 0:
mod_end_lines.append(line_counter)
line_counter += 1
#for each module within the file, create a separate XML file for the "scheme"
for l in range(len(module_names)):
#find the *_init, *_run, *_finalize, etc. subroutines, save their location within the file and their names
line_counter = 0
sub_lines = []
sub_names = []
scheme_names = []
#only look at the lines in the current module
for line in file_lines[mod_begin_lines[l]:mod_end_lines[l]]:
words = line.split()
for j in range(len(words)):
#check for the word 'subroutine', that it is the first word in the line, and that a subroutine name exists afterward
if words[j].lower() == 'subroutine' and j+1 <= len(words)-1 and j == 0:
#consider the last substring separated by a '_' of the subroutine name as a 'postfix'
sub_name = words[j+1].split('(')[0].strip()
if sub_name.find('_') >= 0:
#ignore subroutines that have no postfix
if sub_name.find('init') >= 0 or sub_name.find('run') >= 0 or sub_name.find('finalize') >= 0:
#ignore subroutines that have postfixes other than init, run, finalize
sub_lines.append(line_counter)
#DH* case sensitive? sub_names.append(sub_name)
sub_names.append(sub_name.lower())
scheme_names.append(sub_names[-1][0:sub_names[-1].rfind('_')])
line_counter += 1
#check that all the subroutine "root" names in the current module are the same
if scheme_names.count(scheme_names[0]) == len(scheme_names):
scheme_name = scheme_names[0]
else:
message = 'Please check that all of the subroutines have the same root name:\n'
message += ' i.e. scheme_A_init, scheme_A_run, scheme_A_finalize\n'
message += 'Here is a list of the subroutine names:\n'
message += str(sub_names) + '\n'
message += 'Here is a list of the scheme names (parsed from the subroutine names):\n'
message += str(scheme_names)
raise Exception(message)
table_header_sets = []
var_data_sets = []
for j in range(len(sub_names)):
#find the argument table corresponding to each subroutine by searching
#"upward" from the subroutine definition line for the "arg_table_SubroutineName" section
table_found = False
for k in range(mod_begin_lines[l] + sub_lines[j], -1, -1):
line = file_lines[k]
words = line.split()
for word in words:
if 'arg_table_' + sub_names[j] in word.lower():
# DH* case sensitive? if 'arg_table_' + sub_names[j] in word:
table_found = True
header_line = k + 1
break
if table_found:
break
#if an argument table is found, parse it
if table_found:
#separate the table headers
table_headers = file_lines[header_line].split('|')
#check for blank table
if(len(table_headers) > 1):
table_headers = table_headers[1:-1]
table_header_sets.append([x.strip() for x in table_headers])
#get all of the variable information
end_of_table = False
k = header_line + 2
var_data = []
while not end_of_table:
line = file_lines[k]
words = line.split()
if len(words) == 1:
end_of_table = True
else:
var_items = line.split('|')[1:-1]
var_items = [x.strip() for x in var_items]
var_data.append(var_items)
k += 1
var_data_sets.append(var_data)
else:
table_header_sets.append([])
var_data_sets.append([])
else:
#if no table is found, just append an empty list
table_header_sets.append([])
var_data_sets.append([])
#write out the XML in the format that mkcap.py wants
top = ET.Element('scheme')
top.set('module', scheme_name)
for j in range(len(sub_names)):
sub_sub = ET.SubElement(top, 'subroutine')
sub_sub.set('name', sub_names[j])
#right now, the mapping from the tables to the XML is
# 'local var name' => id, 'longname' => name,
# units => units, rank => rank, type => type,
# description => description, kind => kind,
# intent => intent, optional => optional
#this can be generalized and updated in the future using the table header information ####
if len(var_data_sets[j]) > 0:
for k in range(len(var_data_sets[j])):
sub_var = ET.SubElement(sub_sub, 'var')
var_name = ET.SubElement(sub_var, 'name')
try:
var_name.text = var_data_sets[j][k][1]
except IndexError:
raise IndexError('This can be caused by the argument table missing an empty (!!) line in {0}'.format(filename))
var_units = ET.SubElement(sub_var, 'units')
var_units.text = var_data_sets[j][k][3]
var_id = ET.SubElement(sub_var, 'id')
var_id.text = var_data_sets[j][k][0]
var_rank = ET.SubElement(sub_var, 'rank')
var_rank.text = var_data_sets[j][k][4]
var_type = ET.SubElement(sub_var, 'type')
var_type.text = var_data_sets[j][k][5]
var_description = ET.SubElement(sub_var, 'description')
var_description.text = var_data_sets[j][k][2]
var_kind = ET.SubElement(sub_var, 'kind')
var_kind.text = var_data_sets[j][k][6]
var_intent = ET.SubElement(sub_var, 'intent')
var_intent.text = var_data_sets[j][k][7]
var_optional = ET.SubElement(sub_var, 'optional')
var_optional.text = var_data_sets[j][k][8]
indent(top)
tree = ET.ElementTree(top)
xmlfile = scheme_name + '.xml'
tree.write(xmlfile, xml_declaration=True, encoding='utf-8', method="xml")
print 'Parsed tables for ' + ", ".join([str(x) for x in sub_names] ) + ' in module ' \
+ module_names[l] + '; output => ' + xmlfile
xmlfiles.append(xmlfile)
return xmlfiles
def main():
#set up the command line argument parser
|
if __name__ == '__main__':
main()
| parser = argparse.ArgumentParser()
#the only arguments are a list of filenames to parse
parser.add_argument('file', help='paths to fortran source code to parse for generating CCPP scheme XML files', nargs='*')
args = parser.parse_args()
#parse scheme tables in all files
parse_scheme_tables(args.file) |
triviaUtils.py | import locale
import random
from typing import List
import CynanBotCommon.utils as utils
from CynanBotCommon.cuteness.cutenessResult import CutenessResult
from CynanBotCommon.trivia.absTriviaQuestion import AbsTriviaQuestion
from CynanBotCommon.trivia.triviaScoreResult import TriviaScoreResult
from CynanBotCommon.trivia.triviaType import TriviaType
class TriviaUtils():
def __init__(self):
pass
def getCorrectAnswerReveal(
self,
question: AbsTriviaQuestion,
newCuteness: CutenessResult,
userNameThatRedeemed: str,
delimiter: str = '; '
) -> str:
if question is None:
raise ValueError(f'question argument is malformed: \"{question}\"')
elif newCuteness is None:
raise ValueError(f'newCuteness argument is malformed: \"{newCuteness}\"')
elif not utils.isValidStr(userNameThatRedeemed):
raise ValueError(f'userNameThatRedeemed argument is malformed: \"{userNameThatRedeemed}\"')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
prefix = f'{self.getRandomTriviaEmote()} Congratulations @{userNameThatRedeemed}, that\'s correct!'
infix = f'Your new cuteness is {newCuteness.getCutenessStr()}.'
correctAnswers = question.getCorrectAnswers()
if len(correctAnswers) == 1:
return f'{prefix} 🎉 {infix} ✨ The correct answer was: {correctAnswers[0]}'
else:
correctAnswersStr = delimiter.join(correctAnswers)
return f'{prefix} 🎉 {infix} ✨ The correct answers were: {correctAnswersStr}'
def getIncorrectAnswerReveal(
self,
question: AbsTriviaQuestion,
userNameThatRedeemed: str,
delimiter: str = '; '
) -> str:
if question is None:
raise ValueError(f'question argument is malformed: \"{question}\"')
elif not utils.isValidStr(userNameThatRedeemed):
raise ValueError(f'userNameThatRedeemed argument is malformed: \"{userNameThatRedeemed}\"')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
prefix = f'{self.getRandomTriviaEmote()} Sorry @{userNameThatRedeemed}, that\'s incorrect. {utils.getRandomSadEmoji()}'
correctAnswers = question.getCorrectAnswers()
if len(correctAnswers) == 1:
return f'{prefix} The correct answer is: {correctAnswers[0]}'
else:
correctAnswersStr = delimiter.join(correctAnswers)
return f'{prefix} The correct answers are: {correctAnswersStr}'
def getOutOfTimeAnswerReveal(
self,
question: AbsTriviaQuestion,
userNameThatRedeemed: str,
delimiter: str = '; '
) -> str:
if question is None:
raise ValueError(f'question argument is malformed: \"{question}\"')
elif not utils.isValidStr(userNameThatRedeemed):
raise ValueError(f'userNameThatRedeemed argument is malformed: \"{userNameThatRedeemed}\"')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
prefix = f'{self.getRandomTriviaEmote()} Sorry @{userNameThatRedeemed}, you\'re out of time. {utils.getRandomSadEmoji()}'
correctAnswers = question.getCorrectAnswers()
if len(correctAnswers) == 1:
return f'{prefix} The correct answer is: {correctAnswers[0]}'
else:
correctAnswersStr = delimiter.join(correctAnswers)
return f'{prefix} The correct answers are: {correctAnswersStr}'
def getRandomTriviaEmote(self) -> str:
triviaEmotes: List[str] = [ '🏫', '🖍️', '✏️', '🧑🎓', '👨🎓', '👩🎓', '🧑🏫', '👨🏫', '👩🏫' ]
return random.choice(triviaEmotes)
def getResults(self, userName: str, triviaResult: TriviaScoreResult) -> str:
if not utils.isValidStr(userName):
raise ValueError(f'userName argument is malformed: \"{userName}\"')
elif triviaResult is None:
raise ValueError(f'triviaResult argument is malformed: \"{triviaResult}\"')
if triviaResult.getTotal() <= 0:
return f'@{userName} has not played any trivia games 😿'
gamesStr: str = 'games'
if triviaResult.getTotal() == 1:
gamesStr = 'game'
winsStr: str = 'wins'
if triviaResult.getTotalWins() == 1:
winsStr = 'win'
superTriviaWinsStr: str = ''
if triviaResult.getSuperTriviaWins() > 1:
superTriviaWinsStr = f' ({triviaResult.getSuperTriviaWinsStr()} of which are super trivia wins)'
elif triviaResult.getSuperTriviaWins() == 1:
superTriviaWinsStr = f' ({triviaResult.getSuperTriviaWinsStr()} of which is a super trivia win)'
lossesStr: str = 'losses'
if triviaResult.getTotalLosses() == 1:
lossesStr = 'loss'
ratioStr: str = f' ({triviaResult.getWinPercentStr()} wins)'
streakStr: str = ''
if triviaResult.getStreak() >= 3:
streakStr = f'… and is on a {triviaResult.getAbsStreakStr()} game winning streak 😸'
elif triviaResult.getStreak() <= -3:
streakStr = f'… and is on a {triviaResult.getAbsStreakStr()} game losing streak 🙀'
return f'@{userName} has played {triviaResult.getTotalStr()} trivia {gamesStr}, with {triviaResult.getTotalWinsStr()} {winsStr}{superTriviaWinsStr} and {triviaResult.getTotalLossesStr()} {lossesStr}{ratioStr}{streakStr}'
def getSuperTriviaCorrectAnswerReveal(
self,
question: AbsTriviaQuestion,
newCuteness: CutenessResult,
multiplier: int,
points: int,
userName: str,
delimiter: str = '; '
) -> str:
if question is None:
raise ValueError(f'question argument is malformed: \"{question}\"')
elif newCuteness is None:
raise ValueError(f'newCuteness argument is malformed: \"{newCuteness}\"')
elif not utils.isValidNum(multiplier):
raise ValueError(f'multiplier argument is malformed: \"{multiplier}\"')
elif not utils.isValidNum(points):
raise ValueError(f'points argument is malformed: \"{points}\"')
elif not utils.isValidStr(userName):
raise ValueError(f'userName argument is malformed: \"{userName}\"')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
pointsStr = locale.format_string("%d", points, grouping = True)
multiplierStr = locale.format_string("%d", multiplier, grouping = True)
prefix = f'{self.getRandomTriviaEmote()} CONGRATULATIONS @{userName}, that\'s correct!'
infix = f'You earned {pointsStr} cuteness ({multiplierStr}x multiplier), so your new cuteness is {newCuteness.getCutenessStr()}.'
correctAnswers = question.getCorrectAnswers()
if len(correctAnswers) == 1:
return f'{prefix} 🎉 {infix} ✨ The correct answer was: {correctAnswers[0]}'
else:
correctAnswersStr = delimiter.join(correctAnswers)
return f'{prefix} 🎉 {infix} ✨ The correct answers were: {correctAnswersStr}'
def getSuperTriviaOutOfTimeAnswerReveal(
self,
question: AbsTriviaQuestion,
multiplier: int,
delimiter: str = '; '
) -> str:
if question is None:
raise ValueError(f'question argument is malformed: \"{question}\"')
elif not utils.isValidNum(multiplier):
raise ValueError(f'multiplier argument is malformed: \"{multiplier}\"')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
multiplierStr = locale.format_string("%d", multiplier, grouping = True)
prefix = f'{self.getRandomTriviaEmote()} Sorry everyone, y\'all are out of time… {utils.getRandomSadEmoji()} Goodbye {multiplierStr}x multiplier 👋…'
correctAnswers = question.getCorrectAnswers()
if len(correctAnswers) == 1:
return f'{prefix} The correct answer is: {correctAnswers[0]}'
else:
correctAnswersStr = delimiter.join(correctAnswers)
return f'{prefix} The correct answers are: {correctAnswersStr}'
def getSuperTriviaQuestionPrompt(
self,
triviaQuestion: AbsTriviaQuestion,
delaySeconds: int,
points: int,
multiplier: int,
delimiter: str = ' '
) -> str:
if triviaQuestion is None:
raise ValueError(f'triviaQuestion argument is malformed: \"{triviaQuestion}\"')
elif not utils.isValidNum(delaySeconds):
raise ValueError(f'delaySeconds argument is malformed: \"{delaySeconds}\"')
elif delaySeconds < 1:
raise ValueError(f'delaySeconds argument is out of bounds: {delaySeconds}')
elif not utils.isValidNum(points):
raise ValueError(f'points argument is malformed: \"{points}\"')
elif points < 1:
raise ValueError(f'points argument is out of bounds: {points}')
elif not utils.isValidNum(multiplier):
raise ValueError(f'multiplier argument is malformed: \"{multiplier}\"')
elif multiplier < 1:
raise ValueError(f'multiplier argument is out of bounds: {multiplier}')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
triviaEmote = self.getRandomTriviaEmote()
delaySecondsStr = locale.format_string("%d", delaySeconds, grouping = True)
pointsStr = locale.format_string("%d", points, grouping = True)
multiplierStr = locale.format_string("%d", multiplier, grouping = True)
questionPrompt: str = None
if triviaQuestion.getTriviaType() is TriviaType.QUESTION_ANSWER and triviaQuestion.hasCategory():
questionPrompt = f'— category is {triviaQuestion.getCategory()} — {triviaQuestion.getQuestion()}'
else:
questionPrompt = f'— {triviaQuestion.getPrompt(delimiter)}'
return f'{triviaEmote} EVERYONE can play! !superanswer in {delaySecondsStr}s for {pointsStr} points ({multiplierStr}x multiplier ✨) {questionPrompt}'
def getTriviaGameQuestionPrompt(
self,
triviaQuestion: AbsTriviaQuestion,
delaySeconds: int,
points: int,
userNameThatRedeemed: str,
delimiter: str = ' '
) -> str:
if triviaQuestion is None:
raise ValueError(f'triviaQuestion argument is malformed: \"{triviaQuestion}\"')
elif not utils.isValidNum(delaySeconds):
raise ValueError(f'delaySeconds argument is malformed: \"{delaySeconds}\"')
elif delaySeconds < 1:
raise ValueError(f'delaySeconds argument is out of bounds: {delaySeconds}')
elif not utils.isValidNum(points):
raise ValueError(f'points argument is malformed: \"{points}\"')
elif points < 1:
raise ValueError(f'points argument is out of bounds: {points}')
elif delimiter is None:
raise ValueError(f'delimiter argument is malformed: \"{delimiter}\"')
triviaEmote = self.getRandomTriviaEmote() | if points == 1:
pointsPlurality = 'point'
else:
pointsPlurality = 'points'
questionPrompt: str = None
if triviaQuestion.getTriviaType() is TriviaType.QUESTION_ANSWER and triviaQuestion.hasCategory():
questionPrompt = f'(category is \"{triviaQuestion.getCategory()}\") — {triviaQuestion.getQuestion()}'
else:
questionPrompt = f'— {triviaQuestion.getPrompt(delimiter)}'
return f'{triviaEmote} @{userNameThatRedeemed} !answer in {delaySecondsStr}s for {pointsStr} {pointsPlurality} {questionPrompt}' | delaySecondsStr = locale.format_string("%d", delaySeconds, grouping = True)
pointsStr = locale.format_string("%d", points, grouping = True)
pointsPlurality: str = None |
http.go | package kernel
import (
"context"
"flag"
"log"
"net/http"
"os"
"os/signal"
"time"
"github.com/gorilla/mux"
"github.com/lucidfy/lucid/pkg/facade/logger"
"github.com/lucidfy/lucid/pkg/facade/routes"
"github.com/lucidfy/lucid/pkg/facade/urls"
"github.com/lucidfy/lucid/registrar"
)
type App struct {
Server *http.Server
Deadline time.Duration
}
func | () *App {
var wait time.Duration
flag.DurationVar(&wait, "graceful-timeout", time.Second*15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
flag.Parse()
handler := routes.Mux().Register(registrar.Routes).(*mux.Router)
srv := &http.Server{
Addr: urls.GetAddr(),
// Good practice to set timeouts to avoid Slow-loris attacks.
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: handler,
}
return &App{
Server: srv,
Deadline: wait,
}
}
func (h *App) Run() *App {
// Run our server in a goroutine so that it doesn't block.
go func() {
if err := h.Server.ListenAndServe(); err != nil {
logger.Println(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
return h
}
func (h *App) WithGracefulShutdown() *App {
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), h.Deadline)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
h.Server.Shutdown(ctx)
// Optionally, you could run srv.Shutdown in a goroutine and block on
// <-ctx.Done() if your application should wait for other services
// to finalize based on context cancellation.
log.Println("shutting down")
os.Exit(0)
return h
}
| Init |
errors.generated.go | package http
import "github.com/v2fly/v2ray-core/v5/common/errors"
type errPathObjHolder struct{}
func newError(values ...interface{}) *errors.Error | {
return errors.New(values...).WithPathObj(errPathObjHolder{})
} |
|
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
#[macro_use]
extern crate rocket;
#[macro_use]
extern crate rocket_okapi;
use rocket_contrib::json::Json;
use rocket_okapi::swagger_ui::*;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
struct User {
user_id: u64,
username: String,
#[serde(default)]
email: Option<String>,
}
/// # Get all users
///
/// Returns all users in the system.
#[openapi]
#[get("/user")]
fn get_all_users() -> Json<Vec<User>> {
Json(vec![User {
user_id: 42,
username: "bob".to_owned(),
email: None,
}])
} | /// # Get user
///
/// Returns a single user by ID.
#[openapi]
#[get("/user/<id>")]
fn get_user(id: u64) -> Option<Json<User>> {
Some(Json(User {
user_id: id,
username: "bob".to_owned(),
email: None,
}))
}
/// # Create user
#[openapi]
#[post("/user", data = "<user>")]
fn create_user(user: Json<User>) -> Json<User> {
user
}
#[openapi(skip)]
#[get("/hidden")]
fn hidden() -> Json<&'static str> {
Json("Hidden from swagger!")
}
fn main() {
rocket::ignite()
.mount(
"/",
routes_with_openapi![get_all_users, get_user, create_user, hidden],
)
.mount(
"/swagger-ui/",
make_swagger_ui(&SwaggerUIConfig {
url: Some("../openapi.json".to_owned()),
urls: None,
}),
)
.launch();
} | |
development.js | 'use strict';
var gulp = require('gulp');
var runSequence = require('run-sequence');
gulp.task('dev', ['clean'], function(cb) {
cb = cb || function() {};
global.isProd = false;
runSequence(['styles', 'images', 'fonts', 'views', 'data', 'browserify'], 'watch', cb);
}); | ||
GGraph.ts |
namespace fgui {
export class | extends GObject {
public _content: cc.Graphics;
private _type: GraphType = 0;
private _lineSize: number = 0;
private _lineColor: cc.Color;
private _fillColor: cc.Color;
private _cornerRadius: Array<number>;
private _sides: number;
private _startAngle: number;
private _polygonPoints: any[];
private _distances: number[];
private _hasContent: boolean;
public constructor() {
super();
this._node.name = "GGraph";
this._lineSize = 1;
this._lineColor = cc.Color.BLACK;
this._fillColor = cc.Color.WHITE;
this._cornerRadius = null;
this._sides = 3;
this._startAngle = 0;
this._content = this._node.addComponent(cc.Graphics);
}
public drawRect(lineSize: number, lineColor: cc.Color, fillColor: cc.Color, corner?: Array<number>): void {
this._type = GraphType.Rect;
this._lineSize = lineSize;
this._lineColor = lineColor;
this._fillColor = fillColor;
this._cornerRadius = corner;
this.updateGraph();
}
public drawEllipse(lineSize: number, lineColor: cc.Color, fillColor: cc.Color): void {
this._type = GraphType.Ellipse;
this._lineSize = lineSize;
this._lineColor = lineColor;
this._fillColor = fillColor;
this._cornerRadius = null;
this.updateGraph();
}
public drawRegularPolygon(lineSize: number, lineColor: cc.Color, fillColor: cc.Color, sides: number, startAngle: number = 0, distances: number[] = null): void {
this._type = 3;
this._lineSize = lineSize;
this._lineColor = lineColor;
this._fillColor = fillColor;
this._sides = sides;
this._startAngle = startAngle;
this._distances = distances;
this.updateGraph();
}
public drawPolygon(lineSize: number, lineColor: cc.Color, fillColor: cc.Color, points: any[]): void {
this._type = 4;
this._lineSize = lineSize;
this._lineColor = lineColor;
this._fillColor = fillColor;
this._polygonPoints = points;
this.updateGraph();
}
public get distances(): number[] {
return this._distances;
}
public set distances(value: number[]) {
this._distances = value;
if (this._type == 3)
this.updateGraph();
}
public clearGraphics(): void {
this._type = GraphType.PlaceHolder;
if (this._hasContent) {
this._content.clear();
this._hasContent = false;
}
}
public get type(): GraphType {
return this._type;
}
public get color(): cc.Color {
return this._fillColor;
}
public set color(value: cc.Color) {
this._fillColor = value;
if (this._type != 0)
this.updateGraph();
}
private updateGraph(): void {
let ctx = this._content;
if (this._hasContent) {
this._hasContent = false;
ctx.clear();
}
var w: number = this._width;
var h: number = this._height;
if (w == 0 || h == 0)
return;
var px: number = -this.pivotX * this._width;
var py: number = this.pivotY * this._height;
ctx.lineWidth = this._lineSize;
ctx.strokeColor = this._lineColor;
ctx.fillColor = this._fillColor;
if (this._type == 1) {
if (this._cornerRadius) {
ctx.roundRect(0 + px, -h + py, w, h, this._cornerRadius[0] * 2);
}
else
ctx.rect(0 + px, -h + py, w, h);
}
else if (this._type == 2) {
ctx.ellipse(w / 2 + px, -h / 2 + py, w / 2, h / 2);
}
else if (this._type == 3) {
this.drawPath(ctx, this._polygonPoints, px, py);
}
else if (this._type == 4) {
if (!this._polygonPoints)
this._polygonPoints = [];
var radius: number = Math.min(this._width, this._height) / 2;
this._polygonPoints.length = 0;
var angle: number = cc.misc.degreesToRadians(this._startAngle);
var deltaAngle: number = 2 * Math.PI / this._sides;
var dist: number;
for (var i: number = 0; i < this._sides; i++) {
if (this._distances) {
dist = this._distances[i];
if (isNaN(dist))
dist = 1;
}
else
dist = 1;
var xv: number = radius + radius * dist * Math.cos(angle);
var yv: number = radius + radius * dist * Math.sin(angle);
this._polygonPoints.push(xv, yv);
angle += deltaAngle;
}
this.drawPath(ctx, this._polygonPoints, px, py);
}
if (this._lineSize != 0)
ctx.stroke();
ctx.fill();
this._hasContent = true;
}
private drawPath(ctx: cc.Graphics, points: number[], px: number, py: number): void {
var cnt: number = points.length;
ctx.moveTo(points[0] + px, -points[1] + py);
for (var i: number = 2; i < cnt; i += 2)
ctx.lineTo(points[i] + px, -points[i + 1] + py);
ctx.lineTo(points[0] + px, -points[1] + py);
}
protected handleSizeChanged(): void {
super.handleSizeChanged();
if (this._type != 0)
this.updateGraph();
}
protected handleAnchorChanged(): void {
super.handleSizeChanged();
if (this._type != 0)
this.updateGraph();
}
public getProp(index: number): any {
if (index == ObjectPropID.Color)
return this.color;
else
return super.getProp(index);
}
public setProp(index: number, value: any): void {
if (index == ObjectPropID.Color)
this.color = value;
else
super.setProp(index, value);
}
public setup_beforeAdd(buffer: ByteBuffer, beginPos: number): void {
super.setup_beforeAdd(buffer, beginPos);
buffer.seek(beginPos, 5);
this._type = buffer.readByte();
if (this._type != 0) {
var i: number;
var cnt: number;
this._lineSize = buffer.readInt();
this._lineColor = buffer.readColor(true);
this._fillColor = buffer.readColor(true);
if (buffer.readBool()) {
this._cornerRadius = new Array<number>(4);
for (i = 0; i < 4; i++)
this._cornerRadius[i] = buffer.readFloat();
}
if (this._type == 3) {
cnt = buffer.readShort();
this._polygonPoints = [];
this._polygonPoints.length = cnt;
for (i = 0; i < cnt; i++)
this._polygonPoints[i] = buffer.readFloat();
}
else if (this._type == 4) {
this._sides = buffer.readShort();
this._startAngle = buffer.readFloat();
cnt = buffer.readShort();
if (cnt > 0) {
this._distances = [];
for (i = 0; i < cnt; i++)
this._distances[i] = buffer.readFloat();
}
}
this.updateGraph();
}
}
}
} | GGraph |
cloudtasks-gen.go | // Copyright 2022 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package cloudtasks provides access to the Cloud Tasks API.
//
// This package is DEPRECATED. Use package cloud.google.com/go/cloudtasks/apiv2beta2 instead.
//
// For product documentation, see: https://cloud.google.com/tasks/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/cloudtasks/v2"
// ...
// ctx := context.Background()
// cloudtasksService, err := cloudtasks.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// cloudtasksService, err := cloudtasks.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// cloudtasksService, err := cloudtasks.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package cloudtasks // import "google.golang.org/api/cloudtasks/v2"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "cloudtasks:v2"
const apiName = "cloudtasks"
const apiVersion = "v2"
const basePath = "https://cloudtasks.googleapis.com/"
const mtlsBasePath = "https://cloudtasks.mtls.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// See, edit, configure, and delete your Google Cloud data and see the
// email address for your Google Account.
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/cloud-platform",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.Projects = NewProjectsService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Projects *ProjectsService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewProjectsService(s *Service) *ProjectsService {
rs := &ProjectsService{s: s}
rs.Locations = NewProjectsLocationsService(s)
return rs
}
type ProjectsService struct {
s *Service
Locations *ProjectsLocationsService
}
func NewProjectsLocationsService(s *Service) *ProjectsLocationsService |
type ProjectsLocationsService struct {
s *Service
Queues *ProjectsLocationsQueuesService
}
func NewProjectsLocationsQueuesService(s *Service) *ProjectsLocationsQueuesService {
rs := &ProjectsLocationsQueuesService{s: s}
rs.Tasks = NewProjectsLocationsQueuesTasksService(s)
return rs
}
type ProjectsLocationsQueuesService struct {
s *Service
Tasks *ProjectsLocationsQueuesTasksService
}
func NewProjectsLocationsQueuesTasksService(s *Service) *ProjectsLocationsQueuesTasksService {
rs := &ProjectsLocationsQueuesTasksService{s: s}
return rs
}
type ProjectsLocationsQueuesTasksService struct {
s *Service
}
// AppEngineHttpRequest: App Engine HTTP request. The message defines
// the HTTP request that is sent to an App Engine app when the task is
// dispatched. Using AppEngineHttpRequest requires
// `appengine.applications.get`
// (https://cloud.google.com/appengine/docs/admin-api/access-control)
// Google IAM permission for the project and the following scope:
// `https://www.googleapis.com/auth/cloud-platform` The task will be
// delivered to the App Engine app which belongs to the same project as
// the queue. For more information, see How Requests are Routed
// (https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed)
// and how routing is affected by dispatch files
// (https://cloud.google.com/appengine/docs/python/config/dispatchref).
// Traffic is encrypted during transport and never leaves Google
// datacenters. Because this traffic is carried over a communication
// mechanism internal to Google, you cannot explicitly set the protocol
// (for example, HTTP or HTTPS). The request to the handler, however,
// will appear to have used the HTTP protocol. The AppEngineRouting used
// to construct the URL that the task is delivered to can be set at the
// queue-level or task-level: * If app_engine_routing_override is set on
// the queue, this value is used for all tasks in the queue, no matter
// what the setting is for the task-level app_engine_routing. The `url`
// that the task will be sent to is: * `url =` host `+` relative_uri
// Tasks can be dispatched to secure app handlers, unsecure app
// handlers, and URIs restricted with `login: admin`
// (https://cloud.google.com/appengine/docs/standard/python/config/appref).
// Because tasks are not run as any user, they cannot be dispatched to
// URIs restricted with `login: required`
// (https://cloud.google.com/appengine/docs/standard/python/config/appref)
// Task dispatches also do not follow redirects. The task attempt has
// succeeded if the app's request handler returns an HTTP response code
// in the range [`200` - `299`]. The task attempt has failed if the
// app's handler returns a non-2xx response code or Cloud Tasks does not
// receive response before the deadline. Failed tasks will be retried
// according to the retry configuration. `503` (Service Unavailable) is
// considered an App Engine system error instead of an application error
// and will cause Cloud Tasks' traffic congestion control to temporarily
// throttle the queue's dispatches. Unlike other types of task targets,
// a `429` (Too Many Requests) response from an app handler does not
// cause traffic congestion control to throttle the queue.
type AppEngineHttpRequest struct {
// AppEngineRouting: Task-level setting for App Engine routing. * If
// app_engine_routing_override is set on the queue, this value is used
// for all tasks in the queue, no matter what the setting is for the
// task-level app_engine_routing.
AppEngineRouting *AppEngineRouting `json:"appEngineRouting,omitempty"`
// Body: HTTP request body. A request body is allowed only if the HTTP
// method is POST or PUT. It is an error to set a body on a task with an
// incompatible HttpMethod.
Body string `json:"body,omitempty"`
// Headers: HTTP request headers. This map contains the header field
// names and values. Headers can be set when the task is created.
// Repeated headers are not supported but a header value can contain
// commas. Cloud Tasks sets some headers to default values: *
// `User-Agent`: By default, this header is "AppEngine-Google;
// (+http://code.google.com/appengine)". This header can be modified,
// but Cloud Tasks will append "AppEngine-Google;
// (+http://code.google.com/appengine)" to the modified `User-Agent`.
// If the task has a body, Cloud Tasks sets the following headers: *
// `Content-Type`: By default, the `Content-Type` header is set to
// "application/octet-stream". The default can be overridden by
// explicitly setting `Content-Type` to a particular media type when the
// task is created. For example, `Content-Type` can be set to
// "application/json". * `Content-Length`: This is computed by Cloud
// Tasks. This value is output only. It cannot be changed. The headers
// below cannot be set or overridden: * `Host` * `X-Google-*` *
// `X-AppEngine-*` In addition, Cloud Tasks sets some headers when the
// task is dispatched, such as headers containing information about the
// task; see request headers
// (https://cloud.google.com/tasks/docs/creating-appengine-handlers#reading_request_headers).
// These headers are set only when the task is dispatched, so they are
// not visible when the task is returned in a Cloud Tasks response.
// Although there is no specific limit for the maximum number of headers
// or the size, there is a limit on the maximum size of the Task. For
// more information, see the CreateTask documentation.
Headers map[string]string `json:"headers,omitempty"`
// HttpMethod: The HTTP method to use for the request. The default is
// POST. The app's request handler for the task's target URL must be
// able to handle HTTP requests with this http_method, otherwise the
// task attempt fails with error code 405 (Method Not Allowed). See
// Writing a push task request handler
// (https://cloud.google.com/appengine/docs/java/taskqueue/push/creating-handlers#writing_a_push_task_request_handler)
// and the App Engine documentation for your runtime on How Requests are
// Handled
// (https://cloud.google.com/appengine/docs/standard/python3/how-requests-are-handled).
//
// Possible values:
// "HTTP_METHOD_UNSPECIFIED" - HTTP method unspecified
// "POST" - HTTP POST
// "GET" - HTTP GET
// "HEAD" - HTTP HEAD
// "PUT" - HTTP PUT
// "DELETE" - HTTP DELETE
// "PATCH" - HTTP PATCH
// "OPTIONS" - HTTP OPTIONS
HttpMethod string `json:"httpMethod,omitempty"`
// RelativeUri: The relative URI. The relative URI must begin with "/"
// and must be a valid HTTP relative URI. It can contain a path and
// query string arguments. If the relative URI is empty, then the root
// path "/" will be used. No spaces are allowed, and the maximum length
// allowed is 2083 characters.
RelativeUri string `json:"relativeUri,omitempty"`
// ForceSendFields is a list of field names (e.g. "AppEngineRouting") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineRouting") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AppEngineHttpRequest) MarshalJSON() ([]byte, error) {
type NoMethod AppEngineHttpRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AppEngineRouting: App Engine Routing. Defines routing characteristics
// specific to App Engine - service, version, and instance. For more
// information about services, versions, and instances see An Overview
// of App Engine
// (https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine),
// Microservices Architecture on Google App Engine
// (https://cloud.google.com/appengine/docs/python/microservices-on-app-engine),
// App Engine Standard request routing
// (https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed),
// and App Engine Flex request routing
// (https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed).
// Using AppEngineRouting requires `appengine.applications.get`
// (https://cloud.google.com/appengine/docs/admin-api/access-control)
// Google IAM permission for the project and the following scope:
// `https://www.googleapis.com/auth/cloud-platform`
type AppEngineRouting struct {
// Host: Output only. The host that the task is sent to. The host is
// constructed from the domain name of the app associated with the
// queue's project ID (for example .appspot.com), and the service,
// version, and instance. Tasks which were created using the App Engine
// SDK might have a custom domain name. For more information, see How
// Requests are Routed
// (https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed).
Host string `json:"host,omitempty"`
// Instance: App instance. By default, the task is sent to an instance
// which is available when the task is attempted. Requests can only be
// sent to a specific instance if manual scaling is used in App Engine
// Standard
// (https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine?hl=en_US#scaling_types_and_instance_classes).
// App Engine Flex does not support instances. For more information, see
// App Engine Standard request routing
// (https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed)
// and App Engine Flex request routing
// (https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed).
Instance string `json:"instance,omitempty"`
// Service: App service. By default, the task is sent to the service
// which is the default service when the task is attempted. For some
// queues or tasks which were created using the App Engine Task Queue
// API, host is not parsable into service, version, and instance. For
// example, some tasks which were created using the App Engine SDK use a
// custom domain name; custom domains are not parsed by Cloud Tasks. If
// host is not parsable, then service, version, and instance are the
// empty string.
Service string `json:"service,omitempty"`
// Version: App version. By default, the task is sent to the version
// which is the default version when the task is attempted. For some
// queues or tasks which were created using the App Engine Task Queue
// API, host is not parsable into service, version, and instance. For
// example, some tasks which were created using the App Engine SDK use a
// custom domain name; custom domains are not parsed by Cloud Tasks. If
// host is not parsable, then service, version, and instance are the
// empty string.
Version string `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "Host") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Host") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AppEngineRouting) MarshalJSON() ([]byte, error) {
type NoMethod AppEngineRouting
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Attempt: The status of a task attempt.
type Attempt struct {
// DispatchTime: Output only. The time that this attempt was dispatched.
// `dispatch_time` will be truncated to the nearest microsecond.
DispatchTime string `json:"dispatchTime,omitempty"`
// ResponseStatus: Output only. The response from the worker for this
// attempt. If `response_time` is unset, then the task has not been
// attempted or is currently running and the `response_status` field is
// meaningless.
ResponseStatus *Status `json:"responseStatus,omitempty"`
// ResponseTime: Output only. The time that this attempt response was
// received. `response_time` will be truncated to the nearest
// microsecond.
ResponseTime string `json:"responseTime,omitempty"`
// ScheduleTime: Output only. The time that this attempt was scheduled.
// `schedule_time` will be truncated to the nearest microsecond.
ScheduleTime string `json:"scheduleTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "DispatchTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DispatchTime") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Attempt) MarshalJSON() ([]byte, error) {
type NoMethod Attempt
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Binding: Associates `members`, or principals, with a `role`.
type Binding struct {
// Condition: The condition that is associated with this binding. If the
// condition evaluates to `true`, then this binding applies to the
// current request. If the condition evaluates to `false`, then this
// binding does not apply to the current request. However, a different
// role binding might grant the same role to one or more of the
// principals in this binding. To learn which resources support
// conditions in their IAM policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Condition *Expr `json:"condition,omitempty"`
// Members: Specifies the principals requesting access for a Cloud
// Platform resource. `members` can have the following values: *
// `allUsers`: A special identifier that represents anyone who is on the
// internet; with or without a Google account. *
// `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account. *
// `user:{emailid}`: An email address that represents a specific Google
// account. For example, `[email protected]` . *
// `serviceAccount:{emailid}`: An email address that represents a
// service account. For example,
// `[email protected]`. * `group:{emailid}`: An
// email address that represents a Google group. For example,
// `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An
// email address (plus unique identifier) representing a user that has
// been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered
// user retains the role in the binding. *
// `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
// (plus unique identifier) representing a service account that has been
// recently deleted. For example,
// `[email protected]?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains
// the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`:
// An email address (plus unique identifier) representing a Google group
// that has been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the group is
// recovered, this value reverts to `group:{emailid}` and the recovered
// group retains the role in the binding. * `domain:{domain}`: The G
// Suite domain (primary) that represents all the users of that domain.
// For example, `google.com` or `example.com`.
Members []string `json:"members,omitempty"`
// Role: Role that is assigned to the list of `members`, or principals.
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
Role string `json:"role,omitempty"`
// ForceSendFields is a list of field names (e.g. "Condition") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Condition") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Binding) MarshalJSON() ([]byte, error) {
type NoMethod Binding
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CreateTaskRequest: Request message for CreateTask.
type CreateTaskRequest struct {
// ResponseView: The response_view specifies which subset of the Task
// will be returned. By default response_view is BASIC; not all
// information is retrieved by default because some data, such as
// payloads, might be desirable to return only when needed because of
// its large size or because of the sensitivity of data that it
// contains. Authorization for FULL requires `cloudtasks.tasks.fullView`
// Google IAM (https://cloud.google.com/iam/) permission on the Task
// resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
ResponseView string `json:"responseView,omitempty"`
// Task: Required. The task to add. Task names have the following
// format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_
// ID`. The user can optionally specify a task name. If a name is not
// specified then the system will generate a random unique task id,
// which will be set in the task returned in the response. If
// schedule_time is not set or is in the past then Cloud Tasks will set
// it to the current time. Task De-duplication: Explicitly specifying a
// task ID enables task de-duplication. If a task's ID is identical to
// that of an existing task or a task that was deleted or executed
// recently then the call will fail with ALREADY_EXISTS. If the task's
// queue was created using Cloud Tasks, then another task with the same
// name can't be created for ~1hour after the original task was deleted
// or executed. If the task's queue was created using queue.yaml or
// queue.xml, then another task with the same name can't be created for
// ~9days after the original task was deleted or executed. Because there
// is an extra lookup cost to identify duplicate task names, these
// CreateTask calls have significantly increased latency. Using hashed
// strings for the task id or for the prefix of the task id is
// recommended. Choosing task ids that are sequential or have sequential
// prefixes, for example using a timestamp, causes an increase in
// latency and error rates in all task commands. The infrastructure
// relies on an approximately uniform distribution of task ids to store
// and serve tasks efficiently.
Task *Task `json:"task,omitempty"`
// ForceSendFields is a list of field names (e.g. "ResponseView") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ResponseView") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CreateTaskRequest) MarshalJSON() ([]byte, error) {
type NoMethod CreateTaskRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Empty: A generic empty message that you can re-use to avoid defining
// duplicated empty messages in your APIs. A typical example is to use
// it as the request or the response type of an API method. For
// instance: service Foo { rpc Bar(google.protobuf.Empty) returns
// (google.protobuf.Empty); } The JSON representation for `Empty` is
// empty JSON object `{}`.
type Empty struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// Expr: Represents a textual expression in the Common Expression
// Language (CEL) syntax. CEL is a C-like expression language. The
// syntax and semantics of CEL are documented at
// https://github.com/google/cel-spec. Example (Comparison): title:
// "Summary size limit" description: "Determines if a summary is less
// than 100 chars" expression: "document.summary.size() < 100" Example
// (Equality): title: "Requestor is owner" description: "Determines if
// requestor is the document owner" expression: "document.owner ==
// request.auth.claims.email" Example (Logic): title: "Public documents"
// description: "Determine whether the document should be publicly
// visible" expression: "document.type != 'private' && document.type !=
// 'internal'" Example (Data Manipulation): title: "Notification string"
// description: "Create a notification string with a timestamp."
// expression: "'New message received at ' +
// string(document.create_time)" The exact variables and functions that
// may be referenced within an expression are determined by the service
// that evaluates it. See the service documentation for additional
// information.
type Expr struct {
// Description: Optional. Description of the expression. This is a
// longer text which describes the expression, e.g. when hovered over it
// in a UI.
Description string `json:"description,omitempty"`
// Expression: Textual representation of an expression in Common
// Expression Language syntax.
Expression string `json:"expression,omitempty"`
// Location: Optional. String indicating the location of the expression
// for error reporting, e.g. a file name and a position in the file.
Location string `json:"location,omitempty"`
// Title: Optional. Title for the expression, i.e. a short string
// describing its purpose. This can be used e.g. in UIs which allow to
// enter the expression.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Expr) MarshalJSON() ([]byte, error) {
type NoMethod Expr
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetIamPolicyRequest: Request message for `GetIamPolicy` method.
type GetIamPolicyRequest struct {
// Options: OPTIONAL: A `GetPolicyOptions` object for specifying options
// to `GetIamPolicy`.
Options *GetPolicyOptions `json:"options,omitempty"`
// ForceSendFields is a list of field names (e.g. "Options") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Options") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod GetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetPolicyOptions: Encapsulates settings provided to GetIamPolicy.
type GetPolicyOptions struct {
// RequestedPolicyVersion: Optional. The maximum policy version that
// will be used to format the policy. Valid values are 0, 1, and 3.
// Requests specifying an invalid value will be rejected. Requests for
// policies with any conditional role bindings must specify version 3.
// Policies with no conditional role bindings may specify any valid
// value or leave the field unset. The policy in the response might use
// the policy version that you specified, or it might use a lower policy
// version. For example, if you specify version 3, but the policy has no
// conditional role bindings, the response uses version 1. To learn
// which resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "RequestedPolicyVersion") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "RequestedPolicyVersion")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) {
type NoMethod GetPolicyOptions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// HttpRequest: HTTP request. The task will be pushed to the worker as
// an HTTP request. If the worker or the redirected worker acknowledges
// the task by returning a successful HTTP response code ([`200` -
// `299`]), the task will be removed from the queue. If any other HTTP
// response code is returned or no response is received, the task will
// be retried according to the following: * User-specified throttling:
// retry configuration, rate limits, and the queue's state. * System
// throttling: To prevent the worker from overloading, Cloud Tasks may
// temporarily reduce the queue's effective rate. User-specified
// settings will not be changed. System throttling happens because: *
// Cloud Tasks backs off on all errors. Normally the backoff specified
// in rate limits will be used. But if the worker returns `429` (Too
// Many Requests), `503` (Service Unavailable), or the rate of errors is
// high, Cloud Tasks will use a higher backoff rate. The retry specified
// in the `Retry-After` HTTP response header is considered. * To prevent
// traffic spikes and to smooth sudden increases in traffic, dispatches
// ramp up slowly when the queue is newly created or idle and if large
// numbers of tasks suddenly become available to dispatch (due to spikes
// in create task rates, the queue being unpaused, or many tasks that
// are scheduled at the same time).
type HttpRequest struct {
// Body: HTTP request body. A request body is allowed only if the HTTP
// method is POST, PUT, or PATCH. It is an error to set body on a task
// with an incompatible HttpMethod.
Body string `json:"body,omitempty"`
// Headers: HTTP request headers. This map contains the header field
// names and values. Headers can be set when the task is created. These
// headers represent a subset of the headers that will accompany the
// task's HTTP request. Some HTTP request headers will be ignored or
// replaced. A partial list of headers that will be ignored or replaced
// is: * Host: This will be computed by Cloud Tasks and derived from
// HttpRequest.url. * Content-Length: This will be computed by Cloud
// Tasks. * User-Agent: This will be set to "Google-Cloud-Tasks". *
// `X-Google-*`: Google use only. * `X-AppEngine-*`: Google use only.
// `Content-Type` won't be set by Cloud Tasks. You can explicitly set
// `Content-Type` to a media type when the task is created. For example,
// `Content-Type` can be set to "application/octet-stream" or
// "application/json". Headers which can have multiple values
// (according to RFC2616) can be specified using comma-separated values.
// The size of the headers must be less than 80KB.
Headers map[string]string `json:"headers,omitempty"`
// HttpMethod: The HTTP method to use for the request. The default is
// POST.
//
// Possible values:
// "HTTP_METHOD_UNSPECIFIED" - HTTP method unspecified
// "POST" - HTTP POST
// "GET" - HTTP GET
// "HEAD" - HTTP HEAD
// "PUT" - HTTP PUT
// "DELETE" - HTTP DELETE
// "PATCH" - HTTP PATCH
// "OPTIONS" - HTTP OPTIONS
HttpMethod string `json:"httpMethod,omitempty"`
// OauthToken: If specified, an OAuth token
// (https://developers.google.com/identity/protocols/OAuth2) will be
// generated and attached as an `Authorization` header in the HTTP
// request. This type of authorization should generally only be used
// when calling Google APIs hosted on *.googleapis.com.
OauthToken *OAuthToken `json:"oauthToken,omitempty"`
// OidcToken: If specified, an OIDC
// (https://developers.google.com/identity/protocols/OpenIDConnect)
// token will be generated and attached as an `Authorization` header in
// the HTTP request. This type of authorization can be used for many
// scenarios, including calling Cloud Run, or endpoints where you intend
// to validate the token yourself.
OidcToken *OidcToken `json:"oidcToken,omitempty"`
// Url: Required. The full url path that the request will be sent to.
// This string must begin with either "http://" or "https://". Some
// examples are: `http://acme.com` and `https://acme.com/sales:8080`.
// Cloud Tasks will encode some characters for safety and compatibility.
// The maximum allowed URL length is 2083 characters after encoding. The
// `Location` header response from a redirect response [`300` - `399`]
// may be followed. The redirect is not counted as a separate attempt.
Url string `json:"url,omitempty"`
// ForceSendFields is a list of field names (e.g. "Body") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Body") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *HttpRequest) MarshalJSON() ([]byte, error) {
type NoMethod HttpRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLocationsResponse: The response message for
// Locations.ListLocations.
type ListLocationsResponse struct {
// Locations: A list of locations that matches the specified filter in
// the request.
Locations []*Location `json:"locations,omitempty"`
// NextPageToken: The standard List next-page token.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Locations") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Locations") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListLocationsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListQueuesResponse: Response message for ListQueues.
type ListQueuesResponse struct {
// NextPageToken: A token to retrieve next page of results. To return
// the next page of results, call ListQueues with this value as the
// page_token. If the next_page_token is empty, there are no more
// results. The page token is valid for only 2 hours.
NextPageToken string `json:"nextPageToken,omitempty"`
// Queues: The list of queues.
Queues []*Queue `json:"queues,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListQueuesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListQueuesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListTasksResponse: Response message for listing tasks using
// ListTasks.
type ListTasksResponse struct {
// NextPageToken: A token to retrieve next page of results. To return
// the next page of results, call ListTasks with this value as the
// page_token. If the next_page_token is empty, there are no more
// results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Tasks: The list of tasks.
Tasks []*Task `json:"tasks,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListTasksResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListTasksResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Location: A resource that represents Google Cloud Platform location.
type Location struct {
// DisplayName: The friendly name for this location, typically a nearby
// city name. For example, "Tokyo".
DisplayName string `json:"displayName,omitempty"`
// Labels: Cross-service attributes for the location. For example
// {"cloud.googleapis.com/region": "us-east1"}
Labels map[string]string `json:"labels,omitempty"`
// LocationId: The canonical id for this location. For example:
// "us-east1".
LocationId string `json:"locationId,omitempty"`
// Metadata: Service-specific metadata. For example the available
// capacity at the given location.
Metadata googleapi.RawMessage `json:"metadata,omitempty"`
// Name: Resource name for the location, which may vary between
// implementations. For example:
// "projects/example-project/locations/us-east1"
Name string `json:"name,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DisplayName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Location) MarshalJSON() ([]byte, error) {
type NoMethod Location
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OAuthToken: Contains information needed for generating an OAuth token
// (https://developers.google.com/identity/protocols/OAuth2). This type
// of authorization should generally only be used when calling Google
// APIs hosted on *.googleapis.com.
type OAuthToken struct {
// Scope: OAuth scope to be used for generating OAuth access token. If
// not specified, "https://www.googleapis.com/auth/cloud-platform" will
// be used.
Scope string `json:"scope,omitempty"`
// ServiceAccountEmail: Service account email
// (https://cloud.google.com/iam/docs/service-accounts) to be used for
// generating OAuth token. The service account must be within the same
// project as the queue. The caller must have iam.serviceAccounts.actAs
// permission for the service account.
ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"`
// ForceSendFields is a list of field names (e.g. "Scope") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Scope") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OAuthToken) MarshalJSON() ([]byte, error) {
type NoMethod OAuthToken
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OidcToken: Contains information needed for generating an OpenID
// Connect token
// (https://developers.google.com/identity/protocols/OpenIDConnect).
// This type of authorization can be used for many scenarios, including
// calling Cloud Run, or endpoints where you intend to validate the
// token yourself.
type OidcToken struct {
// Audience: Audience to be used when generating OIDC token. If not
// specified, the URI specified in target will be used.
Audience string `json:"audience,omitempty"`
// ServiceAccountEmail: Service account email
// (https://cloud.google.com/iam/docs/service-accounts) to be used for
// generating OIDC token. The service account must be within the same
// project as the queue. The caller must have iam.serviceAccounts.actAs
// permission for the service account.
ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"`
// ForceSendFields is a list of field names (e.g. "Audience") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Audience") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OidcToken) MarshalJSON() ([]byte, error) {
type NoMethod OidcToken
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PauseQueueRequest: Request message for PauseQueue.
type PauseQueueRequest struct {
}
// Policy: An Identity and Access Management (IAM) policy, which
// specifies access controls for Google Cloud resources. A `Policy` is a
// collection of `bindings`. A `binding` binds one or more `members`, or
// principals, to a single `role`. Principals can be user accounts,
// service accounts, Google groups, and domains (such as G Suite). A
// `role` is a named list of permissions; each `role` can be an IAM
// predefined role or a user-created custom role. For some types of
// Google Cloud resources, a `binding` can also specify a `condition`,
// which is a logical expression that allows access to a resource only
// if the expression evaluates to `true`. A condition can add
// constraints based on attributes of the request, the resource, or
// both. To learn which resources support conditions in their IAM
// policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
// **JSON example:** { "bindings": [ { "role":
// "roles/resourcemanager.organizationAdmin", "members": [
// "user:[email protected]", "group:[email protected]",
// "domain:google.com",
// "serviceAccount:[email protected]" ] }, {
// "role": "roles/resourcemanager.organizationViewer", "members": [
// "user:[email protected]" ], "condition": { "title": "expirable access",
// "description": "Does not grant access after Sep 2020", "expression":
// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ],
// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: -
// members: - user:[email protected] - group:[email protected] -
// domain:google.com -
// serviceAccount:[email protected] role:
// roles/resourcemanager.organizationAdmin - members: -
// user:[email protected] role: roles/resourcemanager.organizationViewer
// condition: title: expirable access description: Does not grant access
// after Sep 2020 expression: request.time <
// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3
// For a description of IAM and its features, see the IAM documentation
// (https://cloud.google.com/iam/docs/).
type Policy struct {
// Bindings: Associates a list of `members`, or principals, with a
// `role`. Optionally, may specify a `condition` that determines how and
// when the `bindings` are applied. Each of the `bindings` must contain
// at least one principal. The `bindings` in a `Policy` can refer to up
// to 1,500 principals; up to 250 of these principals can be Google
// groups. Each occurrence of a principal counts towards these limits.
// For example, if the `bindings` grant 50 different roles to
// `user:[email protected]`, and not to any other principal, then you
// can add another 1,450 principals to the `bindings` in the `Policy`.
Bindings []*Binding `json:"bindings,omitempty"`
// Etag: `etag` is used for optimistic concurrency control as a way to
// help prevent simultaneous updates of a policy from overwriting each
// other. It is strongly suggested that systems make use of the `etag`
// in the read-modify-write cycle to perform policy updates in order to
// avoid race conditions: An `etag` is returned in the response to
// `getIamPolicy`, and systems are expected to put that etag in the
// request to `setIamPolicy` to ensure that their change will be applied
// to the same version of the policy. **Important:** If you use IAM
// Conditions, you must include the `etag` field whenever you call
// `setIamPolicy`. If you omit this field, then IAM allows you to
// overwrite a version `3` policy with a version `1` policy, and all of
// the conditions in the version `3` policy are lost.
Etag string `json:"etag,omitempty"`
// Version: Specifies the format of the policy. Valid values are `0`,
// `1`, and `3`. Requests that specify an invalid value are rejected.
// Any operation that affects conditional role bindings must specify
// version `3`. This requirement applies to the following operations: *
// Getting a policy that includes a conditional role binding * Adding a
// conditional role binding to a policy * Changing a conditional role
// binding in a policy * Removing any role binding, with or without a
// condition, from a policy that includes conditions **Important:** If
// you use IAM Conditions, you must include the `etag` field whenever
// you call `setIamPolicy`. If you omit this field, then IAM allows you
// to overwrite a version `3` policy with a version `1` policy, and all
// of the conditions in the version `3` policy are lost. If a policy
// does not include any conditions, operations on that policy may
// specify any valid version or leave the field unset. To learn which
// resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Version int64 `json:"version,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Bindings") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Bindings") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Policy) MarshalJSON() ([]byte, error) {
type NoMethod Policy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PurgeQueueRequest: Request message for PurgeQueue.
type PurgeQueueRequest struct {
}
// Queue: A queue is a container of related tasks. Queues are configured
// to manage how those tasks are dispatched. Configurable properties
// include rate limits, retry options, queue types, and others.
type Queue struct {
// AppEngineRoutingOverride: Overrides for task-level
// app_engine_routing. These settings apply only to App Engine tasks in
// this queue. Http tasks are not affected. If set,
// `app_engine_routing_override` is used for all App Engine tasks in the
// queue, no matter what the setting is for the task-level
// app_engine_routing.
AppEngineRoutingOverride *AppEngineRouting `json:"appEngineRoutingOverride,omitempty"`
// Name: Caller-specified and required in CreateQueue, after which it
// becomes output only. The queue name. The queue name must have the
// following format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` *
// `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens
// (-), colons (:), or periods (.). For more information, see
// Identifying projects
// (https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects)
// * `LOCATION_ID` is the canonical ID for the queue's location. The
// list of available locations can be obtained by calling ListLocations.
// For more information, see https://cloud.google.com/about/locations/.
// * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or
// hyphens (-). The maximum length is 100 characters.
Name string `json:"name,omitempty"`
// PurgeTime: Output only. The last time this queue was purged. All
// tasks that were created before this time were purged. A queue can be
// purged using PurgeQueue, the App Engine Task Queue SDK, or the Cloud
// Console
// (https://cloud.google.com/appengine/docs/standard/python/taskqueue/push/deleting-tasks-and-queues#purging_all_tasks_from_a_queue).
// Purge time will be truncated to the nearest microsecond. Purge time
// will be unset if the queue has never been purged.
PurgeTime string `json:"purgeTime,omitempty"`
// RateLimits: Rate limits for task dispatches. rate_limits and
// retry_config are related because they both control task attempts.
// However they control task attempts in different ways: * rate_limits
// controls the total rate of dispatches from a queue (i.e. all traffic
// dispatched from the queue, regardless of whether the dispatch is from
// a first attempt or a retry). * retry_config controls what happens to
// particular a task after its first attempt fails. That is,
// retry_config controls task retries (the second attempt, third
// attempt, etc). The queue's actual dispatch rate is the result of: *
// Number of tasks in the queue * User-specified throttling:
// rate_limits, retry_config, and the queue's state. * System throttling
// due to `429` (Too Many Requests) or `503` (Service Unavailable)
// responses from the worker, high error rates, or to smooth sudden
// large traffic spikes.
RateLimits *RateLimits `json:"rateLimits,omitempty"`
// RetryConfig: Settings that determine the retry behavior. * For tasks
// created using Cloud Tasks: the queue-level retry settings apply to
// all tasks in the queue that were created using Cloud Tasks. Retry
// settings cannot be set on individual tasks. * For tasks created using
// the App Engine SDK: the queue-level retry settings apply to all tasks
// in the queue which do not have retry settings explicitly set on the
// task and were created by the App Engine SDK. See App Engine
// documentation
// (https://cloud.google.com/appengine/docs/standard/python/taskqueue/push/retrying-tasks).
RetryConfig *RetryConfig `json:"retryConfig,omitempty"`
// StackdriverLoggingConfig: Configuration options for writing logs to
// Stackdriver Logging (https://cloud.google.com/logging/docs/). If this
// field is unset, then no logs are written.
StackdriverLoggingConfig *StackdriverLoggingConfig `json:"stackdriverLoggingConfig,omitempty"`
// State: Output only. The state of the queue. `state` can only be
// changed by calling PauseQueue, ResumeQueue, or uploading
// queue.yaml/xml
// (https://cloud.google.com/appengine/docs/python/config/queueref).
// UpdateQueue cannot be used to change `state`.
//
// Possible values:
// "STATE_UNSPECIFIED" - Unspecified state.
// "RUNNING" - The queue is running. Tasks can be dispatched. If the
// queue was created using Cloud Tasks and the queue has had no activity
// (method calls or task dispatches) for 30 days, the queue may take a
// few minutes to re-activate. Some method calls may return NOT_FOUND
// and tasks may not be dispatched for a few minutes until the queue has
// been re-activated.
// "PAUSED" - Tasks are paused by the user. If the queue is paused
// then Cloud Tasks will stop delivering tasks from it, but more tasks
// can still be added to it by the user.
// "DISABLED" - The queue is disabled. A queue becomes `DISABLED` when
// [queue.yaml](https://cloud.google.com/appengine/docs/python/config/que
// ueref) or
// [queue.xml](https://cloud.google.com/appengine/docs/standard/java/conf
// ig/queueref) is uploaded which does not contain the queue. You cannot
// directly disable a queue. When a queue is disabled, tasks can still
// be added to a queue but the tasks are not dispatched. To permanently
// delete this queue and all of its tasks, call DeleteQueue.
State string `json:"state,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g.
// "AppEngineRoutingOverride") to unconditionally include in API
// requests. By default, fields with empty or default values are omitted
// from API requests. However, any non-pointer, non-interface field
// appearing in ForceSendFields will be sent to the server regardless of
// whether the field is empty or not. This may be used to include empty
// fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineRoutingOverride")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Queue) MarshalJSON() ([]byte, error) {
type NoMethod Queue
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RateLimits: Rate limits. This message determines the maximum rate
// that tasks can be dispatched by a queue, regardless of whether the
// dispatch is a first task attempt or a retry. Note: The debugging
// command, RunTask, will run a task even if the queue has reached its
// RateLimits.
type RateLimits struct {
// MaxBurstSize: Output only. The max burst size. Max burst size limits
// how fast tasks in queue are processed when many tasks are in the
// queue and the rate is high. This field allows the queue to have a
// high rate so processing starts shortly after a task is enqueued, but
// still limits resource usage when many tasks are enqueued in a short
// period of time. The token bucket
// (https://wikipedia.org/wiki/Token_Bucket) algorithm is used to
// control the rate of task dispatches. Each queue has a token bucket
// that holds tokens, up to the maximum specified by `max_burst_size`.
// Each time a task is dispatched, a token is removed from the bucket.
// Tasks will be dispatched until the queue's bucket runs out of tokens.
// The bucket will be continuously refilled with new tokens based on
// max_dispatches_per_second. Cloud Tasks will pick the value of
// `max_burst_size` based on the value of max_dispatches_per_second. For
// queues that were created or updated using `queue.yaml/xml`,
// `max_burst_size` is equal to bucket_size
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#bucket_size).
// Since `max_burst_size` is output only, if UpdateQueue is called on a
// queue created by `queue.yaml/xml`, `max_burst_size` will be reset
// based on the value of max_dispatches_per_second, regardless of
// whether max_dispatches_per_second is updated.
MaxBurstSize int64 `json:"maxBurstSize,omitempty"`
// MaxConcurrentDispatches: The maximum number of concurrent tasks that
// Cloud Tasks allows to be dispatched for this queue. After this
// threshold has been reached, Cloud Tasks stops dispatching tasks until
// the number of concurrent requests decreases. If unspecified when the
// queue is created, Cloud Tasks will pick the default. The maximum
// allowed value is 5,000. This field has the same meaning as
// max_concurrent_requests in queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#max_concurrent_requests).
MaxConcurrentDispatches int64 `json:"maxConcurrentDispatches,omitempty"`
// MaxDispatchesPerSecond: The maximum rate at which tasks are
// dispatched from this queue. If unspecified when the queue is created,
// Cloud Tasks will pick the default. * The maximum allowed value is
// 500. This field has the same meaning as rate in queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#rate).
MaxDispatchesPerSecond float64 `json:"maxDispatchesPerSecond,omitempty"`
// ForceSendFields is a list of field names (e.g. "MaxBurstSize") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MaxBurstSize") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RateLimits) MarshalJSON() ([]byte, error) {
type NoMethod RateLimits
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *RateLimits) UnmarshalJSON(data []byte) error {
type NoMethod RateLimits
var s1 struct {
MaxDispatchesPerSecond gensupport.JSONFloat64 `json:"maxDispatchesPerSecond"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.MaxDispatchesPerSecond = float64(s1.MaxDispatchesPerSecond)
return nil
}
// ResumeQueueRequest: Request message for ResumeQueue.
type ResumeQueueRequest struct {
}
// RetryConfig: Retry config. These settings determine when a failed
// task attempt is retried.
type RetryConfig struct {
// MaxAttempts: Number of attempts per task. Cloud Tasks will attempt
// the task `max_attempts` times (that is, if the first attempt fails,
// then there will be `max_attempts - 1` retries). Must be >= -1. If
// unspecified when the queue is created, Cloud Tasks will pick the
// default. -1 indicates unlimited attempts. This field has the same
// meaning as task_retry_limit in queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters).
MaxAttempts int64 `json:"maxAttempts,omitempty"`
// MaxBackoff: A task will be scheduled for retry between min_backoff
// and max_backoff duration after it fails, if the queue's RetryConfig
// specifies that the task should be retried. If unspecified when the
// queue is created, Cloud Tasks will pick the default. `max_backoff`
// will be truncated to the nearest second. This field has the same
// meaning as max_backoff_seconds in queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters).
MaxBackoff string `json:"maxBackoff,omitempty"`
// MaxDoublings: The time between retries will double `max_doublings`
// times. A task's retry interval starts at min_backoff, then doubles
// `max_doublings` times, then increases linearly, and finally retries
// at intervals of max_backoff up to max_attempts times. For example, if
// min_backoff is 10s, max_backoff is 300s, and `max_doublings` is 3,
// then the a task will first be retried in 10s. The retry interval will
// double three times, and then increase linearly by 2^3 * 10s. Finally,
// the task will retry at intervals of max_backoff until the task has
// been attempted max_attempts times. Thus, the requests will retry at
// 10s, 20s, 40s, 80s, 160s, 240s, 300s, 300s, .... If unspecified when
// the queue is created, Cloud Tasks will pick the default. This field
// has the same meaning as max_doublings in queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters).
MaxDoublings int64 `json:"maxDoublings,omitempty"`
// MaxRetryDuration: If positive, `max_retry_duration` specifies the
// time limit for retrying a failed task, measured from when the task
// was first attempted. Once `max_retry_duration` time has passed *and*
// the task has been attempted max_attempts times, no further attempts
// will be made and the task will be deleted. If zero, then the task age
// is unlimited. If unspecified when the queue is created, Cloud Tasks
// will pick the default. `max_retry_duration` will be truncated to the
// nearest second. This field has the same meaning as task_age_limit in
// queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters).
MaxRetryDuration string `json:"maxRetryDuration,omitempty"`
// MinBackoff: A task will be scheduled for retry between min_backoff
// and max_backoff duration after it fails, if the queue's RetryConfig
// specifies that the task should be retried. If unspecified when the
// queue is created, Cloud Tasks will pick the default. `min_backoff`
// will be truncated to the nearest second. This field has the same
// meaning as min_backoff_seconds in queue.yaml/xml
// (https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters).
MinBackoff string `json:"minBackoff,omitempty"`
// ForceSendFields is a list of field names (e.g. "MaxAttempts") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MaxAttempts") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RetryConfig) MarshalJSON() ([]byte, error) {
type NoMethod RetryConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RunTaskRequest: Request message for forcing a task to run now using
// RunTask.
type RunTaskRequest struct {
// ResponseView: The response_view specifies which subset of the Task
// will be returned. By default response_view is BASIC; not all
// information is retrieved by default because some data, such as
// payloads, might be desirable to return only when needed because of
// its large size or because of the sensitivity of data that it
// contains. Authorization for FULL requires `cloudtasks.tasks.fullView`
// Google IAM (https://cloud.google.com/iam/) permission on the Task
// resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
ResponseView string `json:"responseView,omitempty"`
// ForceSendFields is a list of field names (e.g. "ResponseView") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ResponseView") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RunTaskRequest) MarshalJSON() ([]byte, error) {
type NoMethod RunTaskRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SetIamPolicyRequest: Request message for `SetIamPolicy` method.
type SetIamPolicyRequest struct {
// Policy: REQUIRED: The complete policy to be applied to the
// `resource`. The size of the policy is limited to a few 10s of KB. An
// empty policy is a valid policy but certain Cloud Platform services
// (such as Projects) might reject them.
Policy *Policy `json:"policy,omitempty"`
// ForceSendFields is a list of field names (e.g. "Policy") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Policy") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod SetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// StackdriverLoggingConfig: Configuration options for writing logs to
// Stackdriver Logging (https://cloud.google.com/logging/docs/).
type StackdriverLoggingConfig struct {
// SamplingRatio: Specifies the fraction of operations to write to
// Stackdriver Logging (https://cloud.google.com/logging/docs/). This
// field may contain any value between 0.0 and 1.0, inclusive. 0.0 is
// the default and means that no operations are logged.
SamplingRatio float64 `json:"samplingRatio,omitempty"`
// ForceSendFields is a list of field names (e.g. "SamplingRatio") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SamplingRatio") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *StackdriverLoggingConfig) MarshalJSON() ([]byte, error) {
type NoMethod StackdriverLoggingConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *StackdriverLoggingConfig) UnmarshalJSON(data []byte) error {
type NoMethod StackdriverLoggingConfig
var s1 struct {
SamplingRatio gensupport.JSONFloat64 `json:"samplingRatio"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.SamplingRatio = float64(s1.SamplingRatio)
return nil
}
// Status: The `Status` type defines a logical error model that is
// suitable for different programming environments, including REST APIs
// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
// `Status` message contains three pieces of data: error code, error
// message, and error details. You can find out more about this error
// model and how to work with it in the API Design Guide
// (https://cloud.google.com/apis/design/errors).
type Status struct {
// Code: The status code, which should be an enum value of
// google.rpc.Code.
Code int64 `json:"code,omitempty"`
// Details: A list of messages that carry the error details. There is a
// common set of message types for APIs to use.
Details []googleapi.RawMessage `json:"details,omitempty"`
// Message: A developer-facing error message, which should be in
// English. Any user-facing error message should be localized and sent
// in the google.rpc.Status.details field, or localized by the client.
Message string `json:"message,omitempty"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Status) MarshalJSON() ([]byte, error) {
type NoMethod Status
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Task: A unit of scheduled work.
type Task struct {
// AppEngineHttpRequest: HTTP request that is sent to the App Engine app
// handler. An App Engine task is a task that has AppEngineHttpRequest
// set.
AppEngineHttpRequest *AppEngineHttpRequest `json:"appEngineHttpRequest,omitempty"`
// CreateTime: Output only. The time that the task was created.
// `create_time` will be truncated to the nearest second.
CreateTime string `json:"createTime,omitempty"`
// DispatchCount: Output only. The number of attempts dispatched. This
// count includes attempts which have been dispatched but haven't
// received a response.
DispatchCount int64 `json:"dispatchCount,omitempty"`
// DispatchDeadline: The deadline for requests sent to the worker. If
// the worker does not respond by this deadline then the request is
// cancelled and the attempt is marked as a `DEADLINE_EXCEEDED` failure.
// Cloud Tasks will retry the task according to the RetryConfig. Note
// that when the request is cancelled, Cloud Tasks will stop listening
// for the response, but whether the worker stops processing depends on
// the worker. For example, if the worker is stuck, it may not react to
// cancelled requests. The default and maximum values depend on the type
// of request: * For HTTP tasks, the default is 10 minutes. The deadline
// must be in the interval [15 seconds, 30 minutes]. * For App Engine
// tasks, 0 indicates that the request has the default deadline. The
// default deadline depends on the scaling type
// (https://cloud.google.com/appengine/docs/standard/go/how-instances-are-managed#instance_scaling)
// of the service: 10 minutes for standard apps with automatic scaling,
// 24 hours for standard apps with manual and basic scaling, and 60
// minutes for flex apps. If the request deadline is set, it must be in
// the interval [15 seconds, 24 hours 15 seconds]. Regardless of the
// task's `dispatch_deadline`, the app handler will not run for longer
// than than the service's timeout. We recommend setting the
// `dispatch_deadline` to at most a few seconds more than the app
// handler's timeout. For more information see Timeouts
// (https://cloud.google.com/tasks/docs/creating-appengine-handlers#timeouts).
// `dispatch_deadline` will be truncated to the nearest millisecond. The
// deadline is an approximate deadline.
DispatchDeadline string `json:"dispatchDeadline,omitempty"`
// FirstAttempt: Output only. The status of the task's first attempt.
// Only dispatch_time will be set. The other Attempt information is not
// retained by Cloud Tasks.
FirstAttempt *Attempt `json:"firstAttempt,omitempty"`
// HttpRequest: HTTP request that is sent to the worker. An HTTP task is
// a task that has HttpRequest set.
HttpRequest *HttpRequest `json:"httpRequest,omitempty"`
// LastAttempt: Output only. The status of the task's last attempt.
LastAttempt *Attempt `json:"lastAttempt,omitempty"`
// Name: Optionally caller-specified in CreateTask. The task name. The
// task name must have the following format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_
// ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]),
// hyphens (-), colons (:), or periods (.). For more information, see
// Identifying projects
// (https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects)
// * `LOCATION_ID` is the canonical ID for the task's location. The list
// of available locations can be obtained by calling ListLocations. For
// more information, see https://cloud.google.com/about/locations/. *
// `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or
// hyphens (-). The maximum length is 100 characters. * `TASK_ID` can
// contain only letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or
// underscores (_). The maximum length is 500 characters.
Name string `json:"name,omitempty"`
// ResponseCount: Output only. The number of attempts which have
// received a response.
ResponseCount int64 `json:"responseCount,omitempty"`
// ScheduleTime: The time when the task is scheduled to be attempted or
// retried. `schedule_time` will be truncated to the nearest
// microsecond.
ScheduleTime string `json:"scheduleTime,omitempty"`
// View: Output only. The view specifies which subset of the Task has
// been returned.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
View string `json:"view,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g.
// "AppEngineHttpRequest") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineHttpRequest") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Task) MarshalJSON() ([]byte, error) {
type NoMethod Task
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsRequest: Request message for `TestIamPermissions`
// method.
type TestIamPermissionsRequest struct {
// Permissions: The set of permissions to check for the `resource`.
// Permissions with wildcards (such as '*' or 'storage.*') are not
// allowed. For more information see IAM Overview
// (https://cloud.google.com/iam/docs/overview#permissions).
Permissions []string `json:"permissions,omitempty"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsResponse: Response message for `TestIamPermissions`
// method.
type TestIamPermissionsResponse struct {
// Permissions: A subset of `TestPermissionsRequest.permissions` that
// the caller is allowed.
Permissions []string `json:"permissions,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "cloudtasks.projects.locations.get":
type ProjectsLocationsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets information about a location.
//
// - name: Resource name for the location.
func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall {
c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.get" call.
// Exactly one of *Location or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Location.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Location{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets information about a location.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name for the location.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Location"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.list":
type ProjectsLocationsListCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists information about the supported locations for this
// service.
//
// - name: The resource that owns the locations collection, if
// applicable.
func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall {
c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": A filter to narrow down
// results to a preferred subset. The filtering language accepts strings
// like "displayName=tokyo", and is documented in more detail in AIP-160
// (https://google.aip.dev/160).
func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return. If not set, the service selects a default.
func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": A page token
// received from the `next_page_token` field in the response. Send that
// page token to receive the subsequent page.
func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/locations")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.list" call.
// Exactly one of *ListLocationsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLocationsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLocationsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists information about the supported locations for this service.",
// "flatPath": "v2/projects/{projectsId}/locations",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like \"displayName=tokyo\", and is documented in more detail in [AIP-160](https://google.aip.dev/160).",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The resource that owns the locations collection, if applicable.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of results to return. If not set, the service selects a default.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+name}/locations",
// "response": {
// "$ref": "ListLocationsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudtasks.projects.locations.queues.create":
type ProjectsLocationsQueuesCreateCall struct {
s *Service
parent string
queue *Queue
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a queue. Queues created with this method allow tasks
// to live for a maximum of 31 days. After a task is 31 days old, the
// task will be deleted regardless of whether it was dispatched or not.
// WARNING: Using this method may have unintended side effects if you
// are using an App Engine `queue.yaml` or `queue.xml` file to manage
// your queues. Read Overview of Queue Management and queue.yaml
// (https://cloud.google.com/tasks/docs/queue-yaml) before using this
// method.
//
// - parent: The location name in which the queue will be created. For
// example: `projects/PROJECT_ID/locations/LOCATION_ID` The list of
// allowed locations can be obtained by calling Cloud Tasks'
// implementation of ListLocations.
func (r *ProjectsLocationsQueuesService) Create(parent string, queue *Queue) *ProjectsLocationsQueuesCreateCall {
c := &ProjectsLocationsQueuesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.queue = queue
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesCreateCall) Context(ctx context.Context) *ProjectsLocationsQueuesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.queue)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/queues")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.create" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesCreateCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a queue. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The location name in which the queue will be created. For example: `projects/PROJECT_ID/locations/LOCATION_ID` The list of allowed locations can be obtained by calling Cloud Tasks' implementation of ListLocations.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/queues",
// "request": {
// "$ref": "Queue"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.delete":
type ProjectsLocationsQueuesDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a queue. This command will delete the queue even if
// it has tasks in it. Note: If you delete a queue, a queue with the
// same name can't be created for 7 days. WARNING: Using this method may
// have unintended side effects if you are using an App Engine
// `queue.yaml` or `queue.xml` file to manage your queues. Read Overview
// of Queue Management and queue.yaml
// (https://cloud.google.com/tasks/docs/queue-yaml) before using this
// method.
//
// - name: The queue name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`.
func (r *ProjectsLocationsQueuesService) Delete(name string) *ProjectsLocationsQueuesDeleteCall {
c := &ProjectsLocationsQueuesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesDeleteCall) Context(ctx context.Context) *ProjectsLocationsQueuesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}",
// "httpMethod": "DELETE",
// "id": "cloudtasks.projects.locations.queues.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.get":
type ProjectsLocationsQueuesGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a queue.
//
// - name: The resource name of the queue. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`.
func (r *ProjectsLocationsQueuesService) Get(name string) *ProjectsLocationsQueuesGetCall {
c := &ProjectsLocationsQueuesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesGetCall) Context(ctx context.Context) *ProjectsLocationsQueuesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.get" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesGetCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a queue.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The resource name of the queue. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.getIamPolicy":
type ProjectsLocationsQueuesGetIamPolicyCall struct {
s *Service
resource string
getiampolicyrequest *GetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// GetIamPolicy: Gets the access control policy for a Queue. Returns an
// empty policy if the resource exists and does not have a policy set.
// Authorization requires the following Google IAM
// (https://cloud.google.com/iam) permission on the specified resource
// parent: * `cloudtasks.queues.getIamPolicy`
//
// - resource: REQUIRED: The resource for which the policy is being
// requested. See the operation documentation for the appropriate
// value for this field.
func (r *ProjectsLocationsQueuesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsLocationsQueuesGetIamPolicyCall {
c := &ProjectsLocationsQueuesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.getiampolicyrequest = getiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesGetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsQueuesGetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:getIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.getIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the access control policy for a Queue. Returns an empty policy if the resource exists and does not have a policy set. Authorization requires the following [Google IAM](https://cloud.google.com/iam) permission on the specified resource parent: * `cloudtasks.queues.getIamPolicy`",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:getIamPolicy",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.getIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+resource}:getIamPolicy",
// "request": {
// "$ref": "GetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.list":
type ProjectsLocationsQueuesListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists queues. Queues are returned in lexicographical order.
//
// - parent: The location name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID`.
func (r *ProjectsLocationsQueuesService) List(parent string) *ProjectsLocationsQueuesListCall {
c := &ProjectsLocationsQueuesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": `filter` can be used to
// specify a subset of queues. Any Queue field can be used as a filter
// and several operators as supported. For example: `<=, <, >=, >, !=,
// =, :`. The filter syntax is the same as described in Stackdriver's
// Advanced Logs Filters
// (https://cloud.google.com/logging/docs/view/advanced_filters). Sample
// filter "state: PAUSED". Note that using filters might cause fewer
// queues than the requested page_size to be returned.
func (c *ProjectsLocationsQueuesListCall) Filter(filter string) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": Requested page size.
// The maximum page size is 9800. If unspecified, the page size will be
// the maximum. Fewer queues than requested might be returned, even if
// more queues exist; use the next_page_token in the response to
// determine if more queues exist.
func (c *ProjectsLocationsQueuesListCall) PageSize(pageSize int64) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": A token
// identifying the page of results to return. To request the first page
// results, page_token must be empty. To request the next page of
// results, page_token must be the value of next_page_token returned
// from the previous call to ListQueues method. It is an error to switch
// the value of the filter while iterating through pages.
func (c *ProjectsLocationsQueuesListCall) PageToken(pageToken string) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesListCall) Context(ctx context.Context) *ProjectsLocationsQueuesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/queues")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.list" call.
// Exactly one of *ListQueuesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListQueuesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsQueuesListCall) Do(opts ...googleapi.CallOption) (*ListQueuesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListQueuesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists queues. Queues are returned in lexicographical order.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "`filter` can be used to specify a subset of queues. Any Queue field can be used as a filter and several operators as supported. For example: `\u003c=, \u003c, \u003e=, \u003e, !=, =, :`. The filter syntax is the same as described in [Stackdriver's Advanced Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). Sample filter \"state: PAUSED\". Note that using filters might cause fewer queues than the requested page_size to be returned.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "Requested page size. The maximum page size is 9800. If unspecified, the page size will be the maximum. Fewer queues than requested might be returned, even if more queues exist; use the next_page_token in the response to determine if more queues exist.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "A token identifying the page of results to return. To request the first page results, page_token must be empty. To request the next page of results, page_token must be the value of next_page_token returned from the previous call to ListQueues method. It is an error to switch the value of the filter while iterating through pages.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The location name. For example: `projects/PROJECT_ID/locations/LOCATION_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/queues",
// "response": {
// "$ref": "ListQueuesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsQueuesListCall) Pages(ctx context.Context, f func(*ListQueuesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudtasks.projects.locations.queues.patch":
type ProjectsLocationsQueuesPatchCall struct {
s *Service
name string
queue *Queue
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates a queue. This method creates the queue if it does not
// exist and updates the queue if it does exist. Queues created with
// this method allow tasks to live for a maximum of 31 days. After a
// task is 31 days old, the task will be deleted regardless of whether
// it was dispatched or not. WARNING: Using this method may have
// unintended side effects if you are using an App Engine `queue.yaml`
// or `queue.xml` file to manage your queues. Read Overview of Queue
// Management and queue.yaml
// (https://cloud.google.com/tasks/docs/queue-yaml) before using this
// method.
//
// - name: Caller-specified and required in CreateQueue, after which it
// becomes output only. The queue name. The queue name must have the
// following format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` *
// `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]),
// hyphens (-), colons (:), or periods (.). For more information, see
// Identifying projects
// (https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects)
// * `LOCATION_ID` is the canonical ID for the queue's location. The
// list of available locations can be obtained by calling
// ListLocations. For more information, see
// https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain
// letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum
// length is 100 characters.
func (r *ProjectsLocationsQueuesService) Patch(name string, queue *Queue) *ProjectsLocationsQueuesPatchCall {
c := &ProjectsLocationsQueuesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.queue = queue
return c
}
// UpdateMask sets the optional parameter "updateMask": A mask used to
// specify which fields of the queue are being updated. If empty, then
// all fields will be updated.
func (c *ProjectsLocationsQueuesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsQueuesPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesPatchCall) Context(ctx context.Context) *ProjectsLocationsQueuesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.queue)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.patch" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesPatchCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a queue. This method creates the queue if it does not exist and updates the queue if it does exist. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}",
// "httpMethod": "PATCH",
// "id": "cloudtasks.projects.locations.queues.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Caller-specified and required in CreateQueue, after which it becomes output only. The queue name. The queue name must have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the queue's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "A mask used to specify which fields of the queue are being updated. If empty, then all fields will be updated.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "request": {
// "$ref": "Queue"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.pause":
type ProjectsLocationsQueuesPauseCall struct {
s *Service
name string
pausequeuerequest *PauseQueueRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Pause: Pauses the queue. If a queue is paused then the system will
// stop dispatching tasks until the queue is resumed via ResumeQueue.
// Tasks can still be added when the queue is paused. A queue is paused
// if its state is PAUSED.
//
// - name: The queue name. For example:
// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`.
func (r *ProjectsLocationsQueuesService) Pause(name string, pausequeuerequest *PauseQueueRequest) *ProjectsLocationsQueuesPauseCall {
c := &ProjectsLocationsQueuesPauseCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.pausequeuerequest = pausequeuerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesPauseCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesPauseCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesPauseCall) Context(ctx context.Context) *ProjectsLocationsQueuesPauseCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesPauseCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesPauseCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.pausequeuerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:pause")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.pause" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesPauseCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Pauses the queue. If a queue is paused then the system will stop dispatching tasks until the queue is resumed via ResumeQueue. Tasks can still be added when the queue is paused. A queue is paused if its state is PAUSED.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:pause",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.pause",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:pause",
// "request": {
// "$ref": "PauseQueueRequest"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.purge":
type ProjectsLocationsQueuesPurgeCall struct {
s *Service
name string
purgequeuerequest *PurgeQueueRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Purge: Purges a queue by deleting all of its tasks. All tasks created
// before this method is called are permanently deleted. Purge
// operations can take up to one minute to take effect. Tasks might be
// dispatched before the purge takes effect. A purge is irreversible.
//
// - name: The queue name. For example:
// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`.
func (r *ProjectsLocationsQueuesService) Purge(name string, purgequeuerequest *PurgeQueueRequest) *ProjectsLocationsQueuesPurgeCall {
c := &ProjectsLocationsQueuesPurgeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.purgequeuerequest = purgequeuerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesPurgeCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesPurgeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesPurgeCall) Context(ctx context.Context) *ProjectsLocationsQueuesPurgeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesPurgeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesPurgeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.purgequeuerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:purge")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.purge" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesPurgeCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Purges a queue by deleting all of its tasks. All tasks created before this method is called are permanently deleted. Purge operations can take up to one minute to take effect. Tasks might be dispatched before the purge takes effect. A purge is irreversible.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:purge",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.purge",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:purge",
// "request": {
// "$ref": "PurgeQueueRequest"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.resume":
type ProjectsLocationsQueuesResumeCall struct {
s *Service
name string
resumequeuerequest *ResumeQueueRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Resume: Resume a queue. This method resumes a queue after it has been
// PAUSED or DISABLED. The state of a queue is stored in the queue's
// state; after calling this method it will be set to RUNNING. WARNING:
// Resuming many high-QPS queues at the same time can lead to target
// overloading. If you are resuming high-QPS queues, follow the 500/50/5
// pattern described in Managing Cloud Tasks Scaling Risks
// (https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).
//
// - name: The queue name. For example:
// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`.
func (r *ProjectsLocationsQueuesService) Resume(name string, resumequeuerequest *ResumeQueueRequest) *ProjectsLocationsQueuesResumeCall {
c := &ProjectsLocationsQueuesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.resumequeuerequest = resumequeuerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesResumeCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesResumeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesResumeCall) Context(ctx context.Context) *ProjectsLocationsQueuesResumeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesResumeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesResumeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.resumequeuerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:resume")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.resume" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesResumeCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Resume a queue. This method resumes a queue after it has been PAUSED or DISABLED. The state of a queue is stored in the queue's state; after calling this method it will be set to RUNNING. WARNING: Resuming many high-QPS queues at the same time can lead to target overloading. If you are resuming high-QPS queues, follow the 500/50/5 pattern described in [Managing Cloud Tasks Scaling Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:resume",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.resume",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:resume",
// "request": {
// "$ref": "ResumeQueueRequest"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.setIamPolicy":
type ProjectsLocationsQueuesSetIamPolicyCall struct {
s *Service
resource string
setiampolicyrequest *SetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetIamPolicy: Sets the access control policy for a Queue. Replaces
// any existing policy. Note: The Cloud Console does not check
// queue-level IAM permissions yet. Project-level permissions are
// required to use the Cloud Console. Authorization requires the
// following Google IAM (https://cloud.google.com/iam) permission on the
// specified resource parent: * `cloudtasks.queues.setIamPolicy`
//
// - resource: REQUIRED: The resource for which the policy is being
// specified. See the operation documentation for the appropriate
// value for this field.
func (r *ProjectsLocationsQueuesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsQueuesSetIamPolicyCall {
c := &ProjectsLocationsQueuesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.setiampolicyrequest = setiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesSetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsQueuesSetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.setIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the access control policy for a Queue. Replaces any existing policy. Note: The Cloud Console does not check queue-level IAM permissions yet. Project-level permissions are required to use the Cloud Console. Authorization requires the following [Google IAM](https://cloud.google.com/iam) permission on the specified resource parent: * `cloudtasks.queues.setIamPolicy`",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:setIamPolicy",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.setIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+resource}:setIamPolicy",
// "request": {
// "$ref": "SetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.testIamPermissions":
type ProjectsLocationsQueuesTestIamPermissionsCall struct {
s *Service
resource string
testiampermissionsrequest *TestIamPermissionsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// TestIamPermissions: Returns permissions that a caller has on a Queue.
// If the resource does not exist, this will return an empty set of
// permissions, not a NOT_FOUND error. Note: This operation is designed
// to be used for building permission-aware UIs and command-line tools,
// not for authorization checking. This operation may "fail open"
// without warning.
//
// - resource: REQUIRED: The resource for which the policy detail is
// being requested. See the operation documentation for the
// appropriate value for this field.
func (r *ProjectsLocationsQueuesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsQueuesTestIamPermissionsCall {
c := &ProjectsLocationsQueuesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTestIamPermissionsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsQueuesTestIamPermissionsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.testIamPermissions" call.
// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TestIamPermissionsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns permissions that a caller has on a Queue. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:testIamPermissions",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.testIamPermissions",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+resource}:testIamPermissions",
// "request": {
// "$ref": "TestIamPermissionsRequest"
// },
// "response": {
// "$ref": "TestIamPermissionsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.create":
type ProjectsLocationsQueuesTasksCreateCall struct {
s *Service
parent string
createtaskrequest *CreateTaskRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a task and adds it to a queue. Tasks cannot be
// updated after creation; there is no UpdateTask command. * The maximum
// task size is 100KB.
//
// - parent: The queue name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The
// queue must already exist.
func (r *ProjectsLocationsQueuesTasksService) Create(parent string, createtaskrequest *CreateTaskRequest) *ProjectsLocationsQueuesTasksCreateCall {
c := &ProjectsLocationsQueuesTasksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.createtaskrequest = createtaskrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksCreateCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtaskrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/tasks")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.create" call.
// Exactly one of *Task or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Task.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsLocationsQueuesTasksCreateCall) Do(opts ...googleapi.CallOption) (*Task, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Task{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a task and adds it to a queue. Tasks cannot be updated after creation; there is no UpdateTask command. * The maximum task size is 100KB.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.tasks.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The queue must already exist.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/tasks",
// "request": {
// "$ref": "CreateTaskRequest"
// },
// "response": {
// "$ref": "Task"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.delete":
type ProjectsLocationsQueuesTasksDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a task. A task can be deleted if it is scheduled or
// dispatched. A task cannot be deleted if it has executed successfully
// or permanently failed.
//
// - name: The task name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TAS
// K_ID`.
func (r *ProjectsLocationsQueuesTasksService) Delete(name string) *ProjectsLocationsQueuesTasksDeleteCall {
c := &ProjectsLocationsQueuesTasksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a task. A task can be deleted if it is scheduled or dispatched. A task cannot be deleted if it has executed successfully or permanently failed.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{tasksId}",
// "httpMethod": "DELETE",
// "id": "cloudtasks.projects.locations.queues.tasks.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The task name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+/tasks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.get":
type ProjectsLocationsQueuesTasksGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a task.
//
// - name: The task name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TAS
// K_ID`.
func (r *ProjectsLocationsQueuesTasksService) Get(name string) *ProjectsLocationsQueuesTasksGetCall {
c := &ProjectsLocationsQueuesTasksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// ResponseView sets the optional parameter "responseView": The
// response_view specifies which subset of the Task will be returned. By
// default response_view is BASIC; not all information is retrieved by
// default because some data, such as payloads, might be desirable to
// return only when needed because of its large size or because of the
// sensitivity of data that it contains. Authorization for FULL requires
// `cloudtasks.tasks.fullView` Google IAM
// (https://cloud.google.com/iam/) permission on the Task resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
func (c *ProjectsLocationsQueuesTasksGetCall) ResponseView(responseView string) *ProjectsLocationsQueuesTasksGetCall {
c.urlParams_.Set("responseView", responseView)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesTasksGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesTasksGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksGetCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.get" call.
// Exactly one of *Task or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Task.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsLocationsQueuesTasksGetCall) Do(opts ...googleapi.CallOption) (*Task, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Task{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a task.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{tasksId}",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.tasks.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The task name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+/tasks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "responseView": {
// "description": "The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.",
// "enum": [
// "VIEW_UNSPECIFIED",
// "BASIC",
// "FULL"
// ],
// "enumDescriptions": [
// "Unspecified. Defaults to BASIC.",
// "The basic view omits fields which can be large or can contain sensitive data. This view does not include the body in AppEngineHttpRequest. Bodies are desirable to return only when needed, because they can be large and because of the sensitivity of the data that you choose to store in it.",
// "All information is returned. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Queue resource."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Task"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.list":
type ProjectsLocationsQueuesTasksListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the tasks in a queue. By default, only the BASIC view is
// retrieved due to performance considerations; response_view controls
// the subset of information which is returned. The tasks may be
// returned in any order. The ordering may change at any time.
//
// - parent: The queue name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`.
func (r *ProjectsLocationsQueuesTasksService) List(parent string) *ProjectsLocationsQueuesTasksListCall {
c := &ProjectsLocationsQueuesTasksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": Maximum page size.
// Fewer tasks than requested might be returned, even if more tasks
// exist; use next_page_token in the response to determine if more tasks
// exist. The maximum page size is 1000. If unspecified, the page size
// will be the maximum.
func (c *ProjectsLocationsQueuesTasksListCall) PageSize(pageSize int64) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": A token
// identifying the page of results to return. To request the first page
// results, page_token must be empty. To request the next page of
// results, page_token must be the value of next_page_token returned
// from the previous call to ListTasks method. The page token is valid
// for only 2 hours.
func (c *ProjectsLocationsQueuesTasksListCall) PageToken(pageToken string) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// ResponseView sets the optional parameter "responseView": The
// response_view specifies which subset of the Task will be returned. By
// default response_view is BASIC; not all information is retrieved by
// default because some data, such as payloads, might be desirable to
// return only when needed because of its large size or because of the
// sensitivity of data that it contains. Authorization for FULL requires
// `cloudtasks.tasks.fullView` Google IAM
// (https://cloud.google.com/iam/) permission on the Task resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
func (c *ProjectsLocationsQueuesTasksListCall) ResponseView(responseView string) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("responseView", responseView)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksListCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesTasksListCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesTasksListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksListCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/tasks")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.list" call.
// Exactly one of *ListTasksResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListTasksResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsQueuesTasksListCall) Do(opts ...googleapi.CallOption) (*ListTasksResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListTasksResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the tasks in a queue. By default, only the BASIC view is retrieved due to performance considerations; response_view controls the subset of information which is returned. The tasks may be returned in any order. The ordering may change at any time.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.tasks.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Maximum page size. Fewer tasks than requested might be returned, even if more tasks exist; use next_page_token in the response to determine if more tasks exist. The maximum page size is 1000. If unspecified, the page size will be the maximum.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "A token identifying the page of results to return. To request the first page results, page_token must be empty. To request the next page of results, page_token must be the value of next_page_token returned from the previous call to ListTasks method. The page token is valid for only 2 hours.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// },
// "responseView": {
// "description": "The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.",
// "enum": [
// "VIEW_UNSPECIFIED",
// "BASIC",
// "FULL"
// ],
// "enumDescriptions": [
// "Unspecified. Defaults to BASIC.",
// "The basic view omits fields which can be large or can contain sensitive data. This view does not include the body in AppEngineHttpRequest. Bodies are desirable to return only when needed, because they can be large and because of the sensitivity of the data that you choose to store in it.",
// "All information is returned. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Queue resource."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+parent}/tasks",
// "response": {
// "$ref": "ListTasksResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsQueuesTasksListCall) Pages(ctx context.Context, f func(*ListTasksResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudtasks.projects.locations.queues.tasks.run":
type ProjectsLocationsQueuesTasksRunCall struct {
s *Service
name string
runtaskrequest *RunTaskRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Run: Forces a task to run now. When this method is called, Cloud
// Tasks will dispatch the task, even if the task is already running,
// the queue has reached its RateLimits or is PAUSED. This command is
// meant to be used for manual debugging. For example, RunTask can be
// used to retry a failed task after a fix has been made or to manually
// force a task to be dispatched now. The dispatched task is returned.
// That is, the task that is returned contains the status after the task
// is dispatched but before the task is received by its target. If Cloud
// Tasks receives a successful response from the task's target, then the
// task will be deleted; otherwise the task's schedule_time will be
// reset to the time that RunTask was called plus the retry delay
// specified in the queue's RetryConfig. RunTask returns NOT_FOUND when
// it is called on a task that has already succeeded or permanently
// failed.
//
// - name: The task name. For example:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TAS
// K_ID`.
func (r *ProjectsLocationsQueuesTasksService) Run(name string, runtaskrequest *RunTaskRequest) *ProjectsLocationsQueuesTasksRunCall {
c := &ProjectsLocationsQueuesTasksRunCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.runtaskrequest = runtaskrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksRunCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksRunCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksRunCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksRunCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksRunCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksRunCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20220111")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.runtaskrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:run")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.run" call.
// Exactly one of *Task or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Task.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsLocationsQueuesTasksRunCall) Do(opts ...googleapi.CallOption) (*Task, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Task{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Forces a task to run now. When this method is called, Cloud Tasks will dispatch the task, even if the task is already running, the queue has reached its RateLimits or is PAUSED. This command is meant to be used for manual debugging. For example, RunTask can be used to retry a failed task after a fix has been made or to manually force a task to be dispatched now. The dispatched task is returned. That is, the task that is returned contains the status after the task is dispatched but before the task is received by its target. If Cloud Tasks receives a successful response from the task's target, then the task will be deleted; otherwise the task's schedule_time will be reset to the time that RunTask was called plus the retry delay specified in the queue's RetryConfig. RunTask returns NOT_FOUND when it is called on a task that has already succeeded or permanently failed.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{tasksId}:run",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.tasks.run",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The task name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+/tasks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:run",
// "request": {
// "$ref": "RunTaskRequest"
// },
// "response": {
// "$ref": "Task"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
| {
rs := &ProjectsLocationsService{s: s}
rs.Queues = NewProjectsLocationsQueuesService(s)
return rs
} |
app.py | # import necessary libraries
from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
# create instance of Flask app
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# create route that renders index.html template
@app.route("/")
def index():
mars_data = mongo.db.mars_db.find_one()
return render_template("index.html", mars_data=mars_data)
@app.route("/scrape") | mongo.db.marsdata.drop()
results = scrape_mars.scrape()
mongo.db.marsdata.insert_one(results)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True) | def scraper(): |
EditItem.js | import React, {Component} from 'react';
import axios from 'axios';
import {Link} from 'react-router';
class EditItem extends Component {
constructor(props) {
super(props);
this.state = {name: '', price: ''};
// The .bind(this) is to indic that function is about this current component "EditItem"
this.handleChange1 = this.handleChange1.bind(this);
this.handleChange2 = this.handleChange2.bind(this);
this.handleSubmit = this.handleSubmit.bind(this);
}
componentDidMount() {
axios.get(`http://localhost:8000/api/items/${this.props.params.id}/edit`) | console.log(error);
})
}
handleChange1(e) {
this.setState({
name: e.target.value
})
}
handleChange2(e) {
this.setState({
price: e.target.value
})
}
handleSubmit(event) {
event.preventDefault();
const products = {
name: this.state.name,
price: this.state.price
}
let uri = 'http://localhost:8000/api/items/' + this.props.params.id;
axios.patch(uri, products).then((response) => {
this.props.history.push('/display-item');
});
}
render() {
return (
<div>
<h1>Update Item</h1>
<div className="row">
<div className="col-md-10"></div>
<div className="col-md-2">
<Link to="/display-item" className="btn btn-success">Return to Items</Link>
</div>
</div>
<form onSubmit={this.handleSubmit}>
<div className="form-group">
<label>Item Name</label>
<input type="text"
className="form-control"
value={this.state.name}
onChange={this.handleChange1}/>
</div>
<div className="form-group">
<label name="product_price">Item Price</label>
<input type="text" className="form-control"
value={this.state.price}
onChange={this.handleChange2}/>
</div>
<div className="form-group">
<button className="btn btn-primary">Update</button>
</div>
</form>
</div>
)
}
}
export default EditItem; | .then(response => {
this.setState({name: response.data.name, price: response.data.price});
})
.catch(function (error) { |
commands.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::server::Facade;
use crate::update::{facade::UpdateFacade, types::UpdateMethod};
use anyhow::Error;
use async_trait::async_trait;
use serde_json::{to_value, Value};
#[async_trait(?Send)]
impl Facade for UpdateFacade {
async fn | (&self, method: String, args: Value) -> Result<Value, Error> {
Ok(match method.parse()? {
UpdateMethod::CheckNow => to_value(self.check_now(args).await?),
UpdateMethod::GetCurrentChannel => to_value(self.get_current_channel().await?),
UpdateMethod::GetTargetChannel => to_value(self.get_target_channel().await?),
UpdateMethod::SetTargetChannel => to_value(self.set_target_channel(args).await?),
UpdateMethod::GetChannelList => to_value(self.get_channel_list().await?),
}?)
}
}
| handle_request |
default_tags.rs | //! Plugin which applies `DefaultTags` to `dc::File`.
use compose_yml::v2 as dc;
use std::marker::PhantomData;
use errors::*; | use plugins::{Operation, PluginNew, PluginTransform};
use project::Project;
/// Applies `DefaultTags` to `dc::File`.
#[derive(Debug)]
#[allow(missing_copy_implementations)]
pub struct Plugin {
/// Placeholder field for future hidden fields, to keep this from being
/// directly constructable.
_placeholder: PhantomData<()>,
}
impl plugins::Plugin for Plugin {
fn name(&self) -> &'static str {
Self::plugin_name()
}
}
impl PluginNew for Plugin {
fn plugin_name() -> &'static str {
"default_tags"
}
fn new(_project: &Project) -> Result<Self> {
Ok(Plugin {
_placeholder: PhantomData,
})
}
}
impl PluginTransform for Plugin {
fn transform(
&self,
_op: Operation,
ctx: &plugins::Context,
file: &mut dc::File,
) -> Result<()> {
// Do we have any default tags specified for this project?
if let Some(tags) = ctx.project.default_tags() {
// Apply the tags to each service.
for service in &mut file.services.values_mut() {
// Clone `self.image` to make life easy for the borrow checker,
// so that it remains my friend.
if let Some(image) = service.image.to_owned() {
let default = tags.default_for(image.value()?);
service.image = Some(dc::value(default));
}
}
}
Ok(())
}
} | use plugins; |
membership_inference_attack.py | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Code that runs membership inference attacks based on the model outputs.
This file belongs to the new API for membership inference attacks. This file
will be renamed to membership_inference_attack.py after the old API is removed.
"""
from typing import Iterable
import numpy as np
from sklearn import metrics
from tensorflow_privacy.privacy.membership_inference_attack import models
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import AttackInputData
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import AttackResults
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import \
PrivacyReportMetadata
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import RocCurve
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import SingleAttackResult
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import SingleSliceSpec
from tensorflow_privacy.privacy.membership_inference_attack.data_structures import SlicingSpec
from tensorflow_privacy.privacy.membership_inference_attack.dataset_slicing import get_single_slice_specs
from tensorflow_privacy.privacy.membership_inference_attack.dataset_slicing import get_slice
def _get_slice_spec(data: AttackInputData) -> SingleSliceSpec:
if hasattr(data, 'slice_spec'):
return data.slice_spec
return SingleSliceSpec()
def _run_trained_attack(attack_input: AttackInputData,
attack_type: AttackType,
balance_attacker_training: bool = True):
"""Classification attack done by ML models."""
attacker = None
if attack_type == AttackType.LOGISTIC_REGRESSION:
attacker = models.LogisticRegressionAttacker()
elif attack_type == AttackType.MULTI_LAYERED_PERCEPTRON:
attacker = models.MultilayerPerceptronAttacker()
elif attack_type == AttackType.RANDOM_FOREST:
attacker = models.RandomForestAttacker()
elif attack_type == AttackType.K_NEAREST_NEIGHBORS:
attacker = models.KNearestNeighborsAttacker()
else:
raise NotImplementedError('Attack type %s not implemented yet.' %
attack_type)
prepared_attacker_data = models.create_attacker_data(
attack_input, balance=balance_attacker_training)
attacker.train_model(prepared_attacker_data.features_train,
prepared_attacker_data.is_training_labels_train)
# Run the attacker on (permuted) test examples.
predictions_test = attacker.predict(prepared_attacker_data.features_test)
# Generate ROC curves with predictions. |
roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)
return SingleAttackResult(
slice_spec=_get_slice_spec(attack_input),
attack_type=attack_type,
roc_curve=roc_curve)
def _run_threshold_attack(attack_input: AttackInputData):
fpr, tpr, thresholds = metrics.roc_curve(
np.concatenate((np.zeros(attack_input.get_train_size()),
np.ones(attack_input.get_test_size()))),
np.concatenate(
(attack_input.get_loss_train(), attack_input.get_loss_test())))
roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)
return SingleAttackResult(
slice_spec=_get_slice_spec(attack_input),
attack_type=AttackType.THRESHOLD_ATTACK,
roc_curve=roc_curve)
def _run_threshold_entropy_attack(attack_input: AttackInputData):
fpr, tpr, thresholds = metrics.roc_curve(
np.concatenate((np.zeros(attack_input.get_train_size()),
np.ones(attack_input.get_test_size()))),
np.concatenate(
(attack_input.get_entropy_train(), attack_input.get_entropy_test())))
roc_curve = RocCurve(tpr=tpr, fpr=fpr, thresholds=thresholds)
return SingleAttackResult(
slice_spec=_get_slice_spec(attack_input),
attack_type=AttackType.THRESHOLD_ENTROPY_ATTACK,
roc_curve=roc_curve)
def _run_attack(attack_input: AttackInputData,
attack_type: AttackType,
balance_attacker_training: bool = True):
attack_input.validate()
if attack_type.is_trained_attack:
return _run_trained_attack(attack_input, attack_type,
balance_attacker_training)
if attack_type == AttackType.THRESHOLD_ENTROPY_ATTACK:
return _run_threshold_entropy_attack(attack_input)
return _run_threshold_attack(attack_input)
def run_attacks(attack_input: AttackInputData,
slicing_spec: SlicingSpec = None,
attack_types: Iterable[AttackType] = (
AttackType.THRESHOLD_ATTACK,),
privacy_report_metadata: PrivacyReportMetadata = None,
balance_attacker_training: bool = True) -> AttackResults:
"""Runs membership inference attacks on a classification model.
It runs attacks specified by attack_types on each attack_input slice which is
specified by slicing_spec.
Args:
attack_input: input data for running an attack
slicing_spec: specifies attack_input slices to run attack on
attack_types: attacks to run
privacy_report_metadata: the metadata of the model under attack.
balance_attacker_training: Whether the training and test sets for the
membership inference attacker should have a balanced (roughly equal)
number of samples from the training and test sets used to develop
the model under attack.
Returns:
the attack result.
"""
attack_input.validate()
attack_results = []
if slicing_spec is None:
slicing_spec = SlicingSpec(entire_dataset=True)
input_slice_specs = get_single_slice_specs(slicing_spec,
attack_input.num_classes)
for single_slice_spec in input_slice_specs:
attack_input_slice = get_slice(attack_input, single_slice_spec)
for attack_type in attack_types:
attack_results.append(
_run_attack(attack_input_slice, attack_type,
balance_attacker_training))
privacy_report_metadata = _compute_missing_privacy_report_metadata(
privacy_report_metadata, attack_input)
return AttackResults(
single_attack_results=attack_results,
privacy_report_metadata=privacy_report_metadata)
def _compute_missing_privacy_report_metadata(
metadata: PrivacyReportMetadata,
attack_input: AttackInputData) -> PrivacyReportMetadata:
"""Populates metadata fields if they are missing."""
if metadata is None:
metadata = PrivacyReportMetadata()
if metadata.accuracy_train is None:
metadata.accuracy_train = _get_accuracy(attack_input.logits_train,
attack_input.labels_train)
if metadata.accuracy_test is None:
metadata.accuracy_test = _get_accuracy(attack_input.logits_test,
attack_input.labels_test)
if metadata.loss_train is None:
metadata.loss_train = np.average(attack_input.get_loss_train())
if metadata.loss_test is None:
metadata.loss_test = np.average(attack_input.get_loss_test())
return metadata
def _get_accuracy(logits, labels):
"""Computes the accuracy if it is missing."""
if logits is None or labels is None:
return None
return metrics.accuracy_score(labels, np.argmax(logits, axis=1)) | fpr, tpr, thresholds = metrics.roc_curve(
prepared_attacker_data.is_training_labels_test, predictions_test) |
py2.py | import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
# Setup __all__
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
# Python 2 style zip() for Python 3
from .cross import PY3
if PY3:
_zip = zip
def | (*seqs):
"""List zip() version."""
return list(_zip(*seqs))
__all__ += ['zip'] # HACK: using this instead of .append() to not trigger PyCharm
else:
zip = zip
| zip |
YoutubeDownloader.py | import sys, re, os, selenium, time, argparse
from time import sleep
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from urllib.request import urlopen, urlretrieve
class YoutubeDownloader(object):
def __init__(self):
self.driver = webdriver.Chrome()
def download_video(self, directory, query):
driver = self.driver
download_link = "http://www.ssyoutube.com/watch?v=" + query.split("?v=")[1]
driver.get(download_link)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], directory + "/" + name + ".mp4")
break
driver.close()
def parse_links(self, query):
driver = self.driver
driver.get(query)
sleep(10)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
title = soup.select('yt-formatted-string.title > a:nth-child(1)')[0].text
links = list()
for a in soup.find_all('a'):
if "index=" in a['href']:
links.append(a['href'].split('v=')[-1])
return title, links
def download_playlist(self, links, list_dir, number):
driver = self.driver
num = 0
for link in links:
if(num == number):
break
num = num + 1
download_link = "http://www.ssyoutube.com/watch?v=" + link
driver.get(download_link)
time.sleep(15)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
for a in soup.find_all('a'):
if "videoplayback" in a['href']:
name = a['href'].split('=')[-1].replace("+", " ").replace("%28", "(").replace("%29", ")")
urlretrieve(a['href'], list_dir + "/" + name + ".mp4")
break
driver.close()
def | (self, directory):
direct = os.path.dirname(directory)
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory)
return direct
def create_list_directory(self, directory, title):
direct = os.path.dirname(os.path.join(directory, title))
if not os.path.exists(direct):
os.makedirs(direct)
else:
direct = os.path.dirname(directory, title)
return direct
def download(self, query, crawl_type, number, directory):
direct = self.create_base_directory(directory)
if(crawl_type == 'video'):
self.download_video(direct, query)
elif(crawl_type == 'playlist'):
title, links = self.parse_links(query)
list_dir = self.create_list_directory(direct, title)
self.download_playlist(links, list_dir, number)
def main():
parser = argparse.ArgumentParser(description='Youtube Downloader')
parser.add_argument('-q', '--query', type=str, help='Link of video or playlist')
parser.add_argument('-t', '--crawl_type', type=str, default='video', help="Options: 'video' | 'playlist'")
parser.add_argument('-n', '--number', type=int, default=0, help='Number of videos to download from playlist: integer, -1 to download all')
parser.add_argument('-d', '--directory', type=str, default='./Videos/', help='Directory to save results')
# parser.add_argument('-l', '--headless', action='store_true', help='If set, script will be run headless')
args = parser.parse_args()
downloader = YoutubeDownloader()
downloader.download(query=args.query,
crawl_type=args.crawl_type,
number=args.number,
directory=args.directory)
if __name__ == "__main__":
main()
| create_base_directory |
content.py | import os
import re
import logging
from functools import reduce
from itertools import zip_longest
from collections.abc import Iterable
import html
logger = logging.getLogger(__name__)
RE_SPLIT_OR = "(?<!\\\)\|"
class BlockContent(list):
def __init__(self, roam_objects=[]):
"""
Args:
roam_objects (List of BlockContentItem)
"""
if type(roam_objects) not in [list, BlockContent]:
roam_objects = [roam_objects]
for obj in roam_objects:
if type(obj) in [str, int, float]:
obj = String(str(obj))
elif isinstance(obj, BlockContentItem):
pass
else:
raise ValueError(f"roam_objects can't contain {type(obj)} type objects")
self.append(obj)
@classmethod
def find_and_replace(cls, obj, skip=[], *args, **kwargs):
roam_object_types = [
BlockQuote,
CodeBlock,
CodeInline,
Cloze,
Image,
Alias,
Checkbox,
Embed,
View,
Button,
PageTag,
PageRef,
BlockRef,
Attribute,
#Url, #TODO: don't have a good regex for this right now
]
roam_object_types = [o for o in roam_object_types if o not in skip]
roam_objects = BlockContent(obj)
for rm_obj_type in roam_object_types:
roam_objects = rm_obj_type.find_and_replace(roam_objects, *args, **kwargs)
return cls(roam_objects)
@classmethod
def from_string(cls, string, *args, **kwargs):
return cls.find_and_replace(string, *args, **kwargs)
def get_tags(self):
tags = []
for obj in self:
tags += obj.get_tags()
return list(set(tags))
def to_string(self):
return "".join([o.to_string() for o in self])
def to_html(self, *args, **kwargs):
# TODO: implement filters
res = "".join([o.to_html(*args, **kwargs) for o in self])
res = self._all_emphasis_to_html(res)
return res
def is_single_pageref(self):
return len(self)==1 and type(self[0])==PageRef
def get_strings(self):
return [o for o in self if type(o)==String]
@staticmethod
def _get_emphasis_locs(string, emphasis):
emphasis_locs = []
emphasis_start = emphasis_end = None
for i,c in enumerate(string):
if emphasis_start is None and string[i:i+len(emphasis)] == emphasis:
emphasis_start = i
continue
if emphasis_end is None and string[i:i+len(emphasis)] == emphasis:
emphasis_end = i + (len(emphasis)-1)
emphasis_locs.append((emphasis_start, emphasis_end))
emphasis_start = emphasis_end = None
return emphasis_locs
def _emphasis_to_html(self, string, emphasis, html_left, html_right):
emphasis_locs = self._get_emphasis_locs(string, emphasis)
diff = 0
for (i, j) in emphasis_locs:
i, j = i + diff, j + diff
string = string[:i] + html_left + string[i+len(emphasis):j-len(emphasis)+1] + html_right + string[j+1:]
diff += len(html_left+html_right) - len(emphasis+emphasis)
return string
def _all_emphasis_to_html(self, string):
string = self._emphasis_to_html(string, emphasis="`", html_left="<code>", html_right="</code>")
string = self._emphasis_to_html(string, emphasis="**", html_left="<b>", html_right="</b>")
string = self._emphasis_to_html(string, emphasis="__", html_left="<em>", html_right="</em>")
string = self._emphasis_to_html(string, emphasis="^^", html_left='<span class="roam-highlight">', html_right='</span>')
return string
def __repr__(self):
return "<%s(%s)>" % (
self.__class__.__name__, repr(list(self)))
def get_contents(self, recursive=False):
if not recursive:
return list(self)
else:
items = []
for item in self:
items += [item]
items += item.get_contents()
return items
def merge_adjacent_strings(self):
i = 0
while i + 1 < len(self):
if type(self[i]) == String and type(self[i+1]) == String:
self[i].string += self[i+1].string
del self[i+1]
else:
i += 1
class BlockContentItem:
@classmethod
def from_string(cls, string, validate=True):
if validate and not cls.validate_string(string):
raise ValueError(f"Invalid string '{string}' for {cls.__name__}")
@classmethod
def validate_string(cls, string):
pat = cls.create_pattern(string)
pat = "|".join([f"^{p}$" for p in re.split(RE_SPLIT_OR, pat)])
if re.match(re.compile(pat), string):
return True
return False
def to_string(self):
raise NotImplementedError
def to_html(self, *args, **kwargs):
return self.string
def get_tags(self):
return []
def get_contents(self):
return []
@classmethod
def _find_and_replace(cls, string, *args, **kwargs):
"See the find_and_replace method"
pat = cls.create_pattern(string)
if not pat:
return [String(string)]
roam_objects = [cls.from_string(s, validate=False, *args, **kwargs) for s in re.findall(pat, string)]
string_split = [String(s) for s in re.split(pat, string)]
# Weave strings and roam objects together
roam_objects = [a for b in zip_longest(string_split, roam_objects) for a in b if a]
roam_objects = [o for o in roam_objects if o.to_string()]
return roam_objects
@classmethod
def find_and_replace(cls, string, *args, **kwargs):
"""Replace all substring representations of this object with this object
Args:
string (str or sequence of BlockContentItem)
Returns:
BlockContent: A sequence of String and this object type.
"""
if type(string)==str:
roam_objects = BlockContent([String(string)])
elif type(string)==BlockContent:
roam_objects = string
else:
raise ValueError(f"'{type(string)}' is an invalid type for `string`")
new_roam_objects = []
for obj in roam_objects:
if type(obj)==String:
new_roam_objects += cls._find_and_replace(obj.to_string(), *args, **kwargs)
else:
new_roam_objects += [obj]
roam_objects = new_roam_objects
return BlockContent(roam_objects)
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
def __eq__(self, b):
return self.to_string()==b.to_string()
class BlockQuote(BlockContentItem):
def __init__(self, block_content, prefix="> "):
self.block_content = block_content
self.prefix = prefix
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
prefix, quote = re.match("^(>\s?)([\w\W]*)$", string).groups()
block_content = BlockContent.from_string(quote, **kwargs)
return cls(block_content, prefix=prefix)
def to_string(self):
return self.prefix + self.block_content.to_string()
def to_html(self, *args, **kwargs):
return '<blockquote class="rm-bq">' + self.block_content.to_html(*args, **kwargs) + '</blockquote>'
def get_tags(self):
return self.block_content.get_tags()
def get_contents(self):
return self.block_content.get_contents()
@classmethod
def create_pattern(cls, string=None):
return "^>[\w\W]*$"
def __eq__(self, other):
return type(self)==type(other) and self.block_content.to_string()==other.block_content.to_string()
class ClozeLeftBracket(BlockContentItem):
"""
- {
- {1
- {1:
- {c1:
- [[{c1:]]
"""
def __init__(self, id=None, enclosed=False, c=False, sep=""):
self.id = id
self.enclosed = enclosed
self.c = c
self.sep = sep
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[{c?\d*[:|]?\]\]", # [[{]] or [[{c1:}]]
"(?<!{){c?\d+[:|]", # {1 or {c1:
"(?<!{){(?!{)" # {
]
matches = list(re.finditer("|".join(pats), string))
if not matches:
return [String(string)]
objs = []
last_cloze_end = 0
for match in matches:
# Create cloze
text = match.group(0)
c = "c" in text
enclosed = text.startswith("[[")
m = re.search("\d+", text)
id = int(m.group(0)) if m else None
if ":" in text:
sep = ":"
elif "|" in text:
sep = "|"
else:
sep = ""
# Split string and replace with objects
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(id, enclosed, c, sep))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "{"
if self.c:
res += "c"
if self.id:
res += str(self.id)
if self.sep:
res += self.sep
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
return "{{c" + str(self.id) + "::"
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
class ClozeRightBracket(BlockContentItem):
"""
- [[::hint}]]
- [[}]]
- [[::hint]]}
- }
- ::hint}
"""
def __init__(self, enclosed=False, hint=None, string=None):
self.enclosed = enclosed
self.hint = hint
self.string = string
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[(?:::[^}\[]*)?}\]\]", # [[}]] or [[::hint}]]
"\[\[(?:::[^}\[]*)\]\]}", # [[::hint]]}
"(?:::[^}\[]*)}(?!})", # ::hint}
"(?<!})}(?!})", # }
]
matches = re.finditer("|".join(pats), string)
if not matches:
return [String(string)]
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
# [[}]] or [[::hint}]]
if text.startswith("[[") and text.endswith("]]"):
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:]) if "::" in text else None
enclosed = True
# [[::hint]]}
elif text.startswith("[[") and text.endswith("}"):
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:], enclosed=True)
enclosed = False
# } or ::hint}
else:
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:]) if "::" in text else None
enclosed = False
# Split string and replace with objects
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(enclosed, hint=hint))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "}"
if self.hint:
res = self.hint.to_string() + res
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
if self.hint:
return self.hint.to_html() + "}}"
return "}}"
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
class ClozeHint(BlockContentItem):
"""
- {something::hint}
- {something[[::hint]]}
- [[{]]something::hint[[}]]
- [[{]]something[[::hint}]]
"""
def __init__(self, text, enclosed=False):
self.text = text
self.enclosed = enclosed
@classmethod
def from_string(cls, hint):
return cls(hint[2:])
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[::[^\]]*\]\]",
"::[^}\[]*"
]
matches = re.finditer("|".join(pats), string)
if not matches:
return BlockContent(string)
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
if text.startswith("[["):
enclosed = True
text = text[2:-2] # remove surround brackets
else:
enclosed = False
text = text[2:] # remove '::' prefix
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(text, enclosed))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "::" + str(self.text)
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
return "::" + str(self.text)
class Cloze(BlockContentItem):
def __init__(self, inner:BlockContent="", left_bracket:ClozeLeftBracket=None, right_bracket:ClozeRightBracket=None,
hint:ClozeHint=None, id=1, c=True, sep=":", enclosed=False, string=None, roam_db=None):
self.inner = BlockContent(inner)
self.left_bracket = left_bracket or ClozeLeftBracket(id=id, c=c, enclosed=enclosed, sep=sep)
self.right_bracket = right_bracket or ClozeRightBracket(enclosed=enclosed)
if self.right_bracket.hint and hint:
raise ValueError("Only allowed one hint")
if type(hint) == str:
hint = ClozeHint(hint)
self._hint = hint
self.string = string
self.roam_db = roam_db
@property
def hint(self):
return self._hint or self.right_bracket.hint
@property
def id(self):
return self.left_bracket.id if self.left_bracket else None
@id.setter
def id(self, id):
self.left_bracket.id = id
@classmethod
def from_string(cls, string, validate=True, **kwargs):
objs = cls.find_and_replace(string)
if len(objs) != 1 or type(objs[0]) != cls:
raise ValueError(f"Invalid string '{string}' for {cls.__name__}")
return objs[0]
@classmethod
def find_and_replace(cls, string, *args, **kwargs):
objs = BlockContent(string)
objs = ClozeLeftBracket.find_and_replace(objs)
objs = ClozeRightBracket.find_and_replace(objs)
objs = ClozeHint.find_and_replace(objs)
res = []
next_idx = 0
left_idx = right_idx = None
for i, obj in enumerate(objs):
# Left cloze bracket
if right_idx is None and type(obj) == ClozeLeftBracket:
res += objs[next_idx:i]
next_idx = left_idx = i
# Right cloze bracket matched to previous left bracket
elif left_idx is not None and type(obj) == ClozeRightBracket:
inner = objs[left_idx+1:i]
hint = None
if type(inner[-1]) == ClozeHint:
inner, hint = inner[:-1], inner[-1]
inner = BlockContent.find_and_replace(inner)
cloze = cls(inner=inner, left_bracket=objs[left_idx], right_bracket=obj, hint=hint)
res.append(cloze)
left_idx = right_idx = None
next_idx = i+1
# Left bracket after an unmatched left bracket
elif left_idx is not None and type(obj) == ClozeLeftBracket:
res += objs[left_idx:i]
next_idx = left_idx = i
# Right bracket after an unmatched right bracket
elif right_idx is not None and type(obj) == ClozeRightBracket:
res += objs[right_idx:i]
next_idx = right_idx = i
res += objs[next_idx:]
# Remove any cloze brackets or hints which weren't matched up
for i, obj in enumerate(res):
if type(obj) in [ClozeLeftBracket, ClozeRightBracket, ClozeHint]:
res[i] = String(obj.to_string())
cls._assign_cloze_ids([o for o in res if type(o)==Cloze])
bc = BlockContent(res)
bc.merge_adjacent_strings()
return bc
def get_tags(self):
return self.inner.get_tags()
def to_string(self, style="anki"):
"""
Args:
style (string): {'anki','roam'}
"""
if style=="anki":
return "{{c%s::%s%s}}" % (self.id, self.inner.to_string(), self.hint.to_string() if self.hint else "")
elif style=="roam":
res = ""
for o in [self.left_bracket, self.inner, self._hint, self.right_bracket]:
res += o.to_string() if o else ""
return res
else:
raise ValueError(f"style='{style}' is an invalid. "\
"Must be 'anki' or 'roam'")
def to_html(self, *args, **kwargs):
"""
Args:
pageref_cloze (str): {'outside', 'inside', 'base_only'}
"""
kwargs['roam_db'] = self.roam_db
proc_cloze = kwargs.get("proc_cloze", True)
pageref_cloze = kwargs.get("pageref_cloze", "outside")
if not proc_cloze:
bc = BlockContent.find_and_replace(self.to_string("roam"), skip=[Cloze])
return bc.to_html(*args, **kwargs)
# Fancy options to move around the cloze when it's only around a PageRef
if self.inner.is_single_pageref() and self.hint is None:
pageref = self.inner[0]
if pageref_cloze=="outside":
content = pageref.to_html()
return Cloze(id=self.id, inner=content, hint=self.hint).to_string()
elif pageref_cloze=="inside":
clozed_title = Cloze(id=self.id, inner=pageref.title, hint=self.hint).to_string()
return pageref.to_html(title=clozed_title)
elif pageref_cloze=="base_only":
clozed_base = Cloze(id=self.id, inner=pageref.get_basename(), hint=self.hint).to_string()
namespace = pageref.get_namespace()
if namespace:
clozed_base = namespace + "/" + clozed_base
return pageref.to_html(title=clozed_base)
else:
raise ValueError(f"{pageref_cloze} is an invalid option for `pageref_cloze`")
res = ""
for o in [self.left_bracket, self.inner, self._hint, self.right_bracket]:
res += o.to_html() if o else ""
return res
@staticmethod
def _assign_cloze_ids(clozes):
assigned_ids = [c.id for c in clozes if c.id]
next_id = 1
for cloze in clozes:
if cloze.id: continue
while next_id in assigned_ids:
next_id += 1
assigned_ids += [next_id]
cloze.id = next_id
def __repr__(self):
string = self.string or self.to_string(style="roam")
return "<%s(id=%s, string='%s')>" % (
self.__class__.__name__, self.id, string)
def __eq__(self, other):
return type(self)==type(other) and self.inner == other.inner
class Image(BlockContentItem):
def __init__(self, src, alt="", string=None):
self.src = src
self.alt = alt
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
alt, src = re.search("!\[([^\[\]]*)\]\(([^\)\n]+)\)", string).groups()
return cls(src, alt)
@classmethod
def create_pattern(cls, string=None):
return r"!\[[^\[\]]*\]\([^\)\n]+\)"
def to_string(self):
if self.string:
return self.string
return f""
def to_html(self, *arg, **kwargs):
return f'<img src="{html.escape(self.src)}" alt="{html.escape(self.alt)}" draggable="false" class="rm-inline-img">'
def __eq__(self, other):
return type(self)==type(other) and self.src==other.src and self.alt==other.alt
class Alias(BlockContentItem):
def __init__(self, alias, destination, string=None):
self.alias = alias
self.destination = destination
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
alias, destination = re.search(r"^\[([^\[\]]+)\]\(([\W\w]+)\)$", string).groups()
if re.match("^\[\[.*\]\]$", destination):
destination = PageRef.from_string(destination)
elif re.match("^\(\(.*\)\)$", destination):
roam_db = kwargs.get("roam_db", None)
destination = BlockRef.from_string(destination, roam_db=roam_db)
else:
# TODO: should this be a Url object?
destination = String(destination)
return cls(alias, destination, string)
def to_string(self):
if self.string:
return self.string
return f"[{self.alias}]({self.destination.to_string()})"
def to_html(self, *arg, **kwargs):
if type(self.destination)==PageRef:
return '<a title="page: %s" class="rm-alias rm-alias-page">%s</a>' % (
html.escape(self.destination.title), html.escape(self.alias))
elif type(self.destination)==BlockRef:
return '<a title="block: %s" class="rm-alias rm-alias-block">%s</a>' % (
html.escape(self.destination.to_string(expand=True)), html.escape(self.alias))
else:
return '<a title="url: {0}" class="rm-alias rm-alias-external" href="{0}">{1}</a>'.format(
html.escape(self.destination.to_string()), html.escape(self.alias))
def get_tags(self):
return self.destination.get_tags()
def get_contents(self):
return self.destination.get_contents()
@classmethod
def create_pattern(cls, string=None):
re_template = r"\[[^\[\]]+\]\(%s\)"
destination_pats = []
for o in [PageRef, BlockRef]:
dest_pat = o.create_pattern(string)
destination_pats += re.split(RE_SPLIT_OR, dest_pat) if dest_pat else []
destination_pats.append("[^\(\)\[\]]+") # TODO: replace this with a real url regex
return "|".join([re_template % pat for pat in destination_pats])
def __eq__(self, other):
return type(self)==type(other) and self.alias==other.alias and other.destination==other.destination
class CodeBlock(BlockContentItem):
def __init__(self, code, language=None, string=None):
self.code = code
self.language = language
self.string = string
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
supported_languages = [
"clojure", "css", "elixir", "html", "plain text", "python", "ruby",
"swift", "typescript", "isx", "yaml", "rust", "shell", "php", "java",
"c#", "c++", "objective-c", "kotlin", "sql", "haskell", "scala",
"common lisp", "julia", "sparql", "turtle", "javascript"]
pat_lang = "^```(%s)\n" % "|".join([re.escape(l) for l in supported_languages])
match_lang = re.search(pat_lang, string)
if match_lang:
language = match_lang.group(1)
pat = re.compile(f"```{language}\n([^`]*)```")
else:
language = None
pat = re.compile("```([^`]*)```")
code = re.search(pat, string).group(1)
return cls(code, language, string)
@classmethod
def create_pattern(cls, string=None):
return f"```[^`]*```"
def to_string(self):
if self.string: return self.string
if self.language:
return f'```{self.language}\n{self.code}```'
else:
return f'```{self.code}```'
def to_html(self, *args, **kwargs):
code = html.escape(self.code)
return f'<pre><code>{code}</code></pre>'
def __eq__(self, other):
return type(self)==type(other) and self.language==other.language and self.code==other.code
class CodeInline(BlockContentItem):
def __init__(self, code, string=None):
self.code = code
self.string = string
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
pat = re.compile("`([^`]*)`")
code = re.search(pat, string).group(1)
return cls(code, string)
@classmethod
def create_pattern(cls, string=None):
return "`[^`]*`"
def to_string(self):
if self.string: return self.string
return f'`{self.code}`'
def to_html(self, *args, **kwargs):
code = html.escape(self.code)
return f'<code>{code}</code>'
def __eq__(self, other):
return type(self)==type(other) and self.code==other.code
class Checkbox(BlockContentItem):
def __init__(self, checked=False):
self.checked = checked
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(checked="DONE" in string)
@classmethod
def create_pattern(cls, string=None):
return re.escape("{{[[TODO]]}}")+"|"+re.escape("{{[[DONE]]}}")
def get_tags(self):
return ["DONE"] if self.checked else ["TODO"]
def to_string(self):
return "{{[[DONE]]}}" if self.checked else "{{[[TODO]]}}"
def to_html(self, *arg, **kwargs):
if self.checked:
return '<span><label class="check-container"><input type="checkbox" checked=""><span class="checkmark"></span></label></span>'
else:
return '<span><label class="check-container"><input type="checkbox"><span class="checkmark"></span></label></span>'
def __eq__(self, other):
return type(self)==type(other) and self.checked==other.checked
class View(BlockContentItem):
def __init__(self, name: BlockContentItem, text, string=None):
if type(name)==str:
name = String(name)
self.name = name
self.text = text
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
name, text = re.search("{{([^:]*):(.*)}}", string).groups()
if re.match("^\[\[.*\]\]$", name):
name = PageRef.from_string(name)
else:
name = String(name)
return cls(name, text, string)
def to_html(self, *arg, **kwargs):
return html.escape(self.text)
def get_tags(self):
return self.name.get_tags()
def get_contents(self):
return self.name.get_contents()
@classmethod
def create_pattern(cls, strings=None):
re_template = "{{%s:.*}}"
pats = []
for view in ["youtube", "query", "mentions"]:
pats.append(re_template % view)
pats.append(re_template % re.escape(f"[[{view}]]"))
return "|".join(pats)
def to_string(self):
if self.string:
return self.string
return "{{%s:%s}}" % (self.name.to_string(), self.text)
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.text==other.text
class Embed(BlockContentItem):
def __init__(self, name: BlockContentItem, blockref, string=None):
if type(name)==str:
name = String(name)
self.name = name
self.blockref = blockref
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
name, blockref = re.search("{{([^:]*):\s*([^\s]*)\s*}}", string).groups()
if re.match("^\[\[.*\]\]$", name):
name = PageRef.from_string(name)
else:
name = String(name)
blockref = BlockRef.from_string(blockref, **kwargs)
return cls(name, blockref, string)
def to_html(self, *arg, **kwargs):
block = self.blockref.get_referenced_block()
if block:
inner_html = block.to_html(children=True, *arg, **kwargs)
else:
inner_html = self.blockref.to_html(*arg, **kwargs)
return '<div class="rm-embed-container">' + \
inner_html + \
'</div>'
def get_tags(self):
return self.name.get_tags()
def get_contents(self):
return self.name.get_contents()
@classmethod
def create_pattern(cls, strings=None):
pats = []
pats.append("{{embed:\s*%s\s*}}" % BlockRef.create_pattern())
pats.append("{{\[\[embed\]\]:\s*%s\s*}}" % BlockRef.create_pattern())
return "|".join(pats)
def to_string(self):
if self.string:
return self.string
return "{{%s:%s}}" % (self.name.to_string(), self.blockref.to_string())
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.blockref==other.blockref
class Button(BlockContentItem):
def __init__(self, name, text="", string=None):
self.name = name
self.text = text
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
contents = string[2:-2]
if ":" in contents:
m = re.search(r"([^:]*):(.*)", contents)
name, text = m.groups()
else:
name, text = contents, ""
return cls(name, text, string)
def get_tags(self):
return BlockContent.from_string(self.text).get_tags()
def get_contents(self):
return BlockContent.from_string(self.text).get_contents()
def to_string(self):
if self.string: return self.string
if self.text:
return "{{%s:%s}}" % (self.name, self.text)
else:
return "{{%s}}" % self.name
def to_html(self, *arg, **kwargs):
return '<button class="bp3-button bp3-small dont-focus-block">%s</button>' % html.escape(self.name)
@classmethod
def create_pattern(cls, string=None):
return "{{.(?:(?<!{{).)*}}"
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.text==other.text
class PageRef(BlockContentItem):
def __init__(self, title, uid="", string=None):
"""
Args:
title (str or BlockContent)
"""
if type(title)==str: title = PageRef.find_and_replace(title)
self._title = title
self.uid = uid
self.string = string
@property
def title(self):
return self._title.to_string()
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
roam_objects = PageRef.find_and_replace(string[2:-2])
return cls(roam_objects, string=string)
@classmethod
def create_pattern(cls, string, groups=False):
page_refs = PageRef.extract_page_ref_strings(string)
if not page_refs:
return None
if groups:
titles = [re.escape(p[2:-2]) for p in page_refs]
return "|".join([f"(\[\[)({t})(\]\])" for t in titles])
else:
return "|".join([re.escape(p) for p in page_refs])
def get_tags(self):
tags_in_title = [o.get_tags() for o in self._title]
tags_in_title = list(set(reduce(lambda x,y: x+y, tags_in_title)))
return [self.title] + tags_in_title
def get_contents(self):
items = []
for item in self._title:
items += item.get_contents()
return items
def get_namespace(self):
return os.path.split(self.title)[0]
def get_basename(self):
return os.path.split(self.title)[1]
def to_string(self):
if self.string: return self.string
return f"[[{self.title}]]"
def to_html(self, title=None, *args, **kwargs):
#if not title: title=self.title
# Page ref is just a string
if title:
title_html = title
elif set([type(o) for o in self._title]) == set([String]):
title = html.escape(self._title.to_string())
title_split = title.split("/")
if len(title_split) == 1:
title_html = title
else:
namespace, name = "/".join(title_split[:-1]) + "/", title_split[-1]
title_html = \
f'<span class="rm-page-ref-namespace">{namespace}</span>'\
f'<span class="rm-page-ref-name">{name}</span>'
else:
title_html = "".join([o.to_html() for o in self._title])
uid_attr = f' data-link-uid="{self.uid}"' if self.uid else ''
return \
f'<span data-link-title="{html.escape(self.title)}"{uid_attr}>'\
f'<span class="rm-page-ref-brackets">[[</span>'\
f'<span class="rm-page-ref rm-page-ref-link-color">{title_html}</span>'\
f'<span class="rm-page-ref-brackets">]]</span>'\
f'</span>'
@staticmethod
def extract_page_ref_strings(string):
# https://stackoverflow.com/questions/524548/regular-expression-to-detect-semi-colon-terminated-c-for-while-loops/524624#524624
bracket_count = 0
pages = []
page = ""
prev_char = ""
for j,c in enumerate(string):
# Track page opening and closing
if prev_char+c == "[[":
if not page:
page = string[j-1]
bracket_count += 1
prev_char = ""
elif prev_char+c == "]]":
bracket_count -= 1
prev_char = ""
else:
prev_char = c
if page:
page += c
# End of page
if bracket_count == 0 and page:
pages.append(page)
page = ""
return pages
def __eq__(self, other):
return type(self)==type(other) and self.title==other.title
class PageTag(BlockContentItem):
def __init__(self, title, string=None):
"""
Args:
title (str or BlockContent)
"""
if type(title)==str: title = PageRef.find_and_replace(title)
self._title = title
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
title = re.sub("\[\[([\W\w]*)\]\]", "\g<1>", string[1:])
roam_objects = PageRef.find_and_replace(title)
return cls(roam_objects, string)
@property
def title(self):
return self._title.to_string()
def get_tags(self):
tags_in_title = [o.get_tags() for o in self._title]
tags_in_title = list(set(reduce(lambda x,y: x+y, tags_in_title)))
return [self.title] + tags_in_title
def get_contents(self):
items = []
for item in self._title: | items += item.get_contents()
return items
def to_string(self):
if self.string:
return self.string
return "#"+self.title
def to_html(self, *arg, **kwargs):
return \
f'<span data-tag="{html.escape(self.title)}" '\
f'class="rm-page-ref rm-page-ref-tag">#{html.escape(self.title)}</span>'
@classmethod
def create_pattern(cls, string):
pats = ["#[\w\-_@\.]+"]
# Create pattern for page refs which look like tags
page_ref_pat = PageRef.create_pattern(string)
if page_ref_pat:
pats += ["#"+pat for pat in re.split(RE_SPLIT_OR, page_ref_pat)]
return "|".join(pats)
def __eq__(self, other):
return type(self)==type(other) and self.title == other.title
class BlockRef(BlockContentItem):
def __init__(self, uid, roam_db=None, string=None):
self.uid = uid
self.roam_db = roam_db
self.string = string
@classmethod
def from_string(cls, string, *args, **kwargs):
super().from_string(string)
roam_db = kwargs.get("roam_db", None)
return cls(string[2:-2], roam_db=roam_db, string=string)
def to_string(self, expand=False):
if expand:
block = self.get_referenced_block()
if block:
return block.to_string()
if self.string:
return self.string
else:
return f"(({self.uid}))"
def to_html(self, *arg, **kwargs):
block = self.get_referenced_block()
text = block.to_html() if block else html.escape(self.to_string())
return '<div class="rm-block-ref"><span>%s</span></div>' % text
def get_tags(self):
return []
@classmethod
def create_pattern(cls, string=None):
return "\(\([\w\d\-_]{9}\)\)"
def get_referenced_block(self):
if self.roam_db:
return self.roam_db.query_by_uid(self.uid)
def __eq__(self, other):
return type(self)==type(other) and self.uid==other.uid
class Url(BlockContentItem):
def __init__(self, text):
self.text = text
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
return cls(string)
def to_string(self):
return self.text
def to_html(self, *arg, **kwargs):
return f'<span><a href="{html.escape(self.text)}">{html.escape(self.text)}</a></span>'
def __eq__(self, other):
return type(self)==type(other) and self.text==other.text
class String(BlockContentItem):
def __init__(self, string):
if type(string) == String:
string == string.to_string()
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(string)
@classmethod
def validate_string(cls, string):
return True
def to_html(self, *arg, **kwargs):
return html.escape(self.to_string()).replace("\n", "<br>")
def get_tags(self):
return []
def to_string(self):
return self.string
def __eq__(self, other):
return type(self)==type(other) and self.string==other.string
class Attribute(BlockContentItem):
def __init__(self, title, string=None):
self.title = title
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(string[:-2], string)
@classmethod
def validate_string(cls, string):
pat = re.compile(cls.create_pattern(string)+"$")
if re.match(pat, string):
return True
return False
@classmethod
def create_pattern(cls, string=None):
return "^(?:(?<!:)[^:])+::"
def to_html(self, *arg, **kwargs):
return '<span><strong>%s:</strong></span>' % html.escape(self.title)
def get_tags(self):
return [self.title]
def to_string(self):
if self.string:
return self.string
return self.title+"::"
def __eq__(self, other):
return type(self)==type(other) and self.title==other.title | |
filter_wrap.rs | use lossyq::spsc::{Sender};
use super::super::super::{Task, Message, ChannelWrapper, ChannelId,
SenderChannelId, ReceiverChannelId, SenderName, ChannelPosition
};
use super::super::connectable::{Connectable};
use super::super::identified_input::{IdentifiedInput};
use super::super::counter::{OutputCounter, InputCounter};
use super::super::filter::{Filter};
pub struct FilterWrap<InputValue: Send, InputError: Send,
OutputValue: Send, OutputError: Send> {
name : String,
state : Box<Filter<InputValue=InputValue, InputError=InputError,
OutputValue=OutputValue, OutputError=OutputError>+Send>,
input_rx : ChannelWrapper<InputValue, InputError>,
output_tx : Sender<Message<OutputValue, OutputError>>,
}
pub fn new<InputValue: Send, InputError: Send, OutputValue: Send, OutputError: Send>(
name : String,
state : Box<Filter<InputValue=InputValue, InputError=InputError,
OutputValue=OutputValue, OutputError=OutputError>+Send>,
input_rx : ChannelWrapper<InputValue, InputError>,
output_tx : Sender<Message<OutputValue, OutputError>>)
-> FilterWrap<InputValue, InputError, OutputValue, OutputError>
{
FilterWrap{ name: name, state: state, input_rx: input_rx, output_tx: output_tx }
}
impl<InputValue: Send, InputError: Send, OutputValue: Send, OutputError: Send> IdentifiedInput
for FilterWrap<InputValue, InputError, OutputValue, OutputError>
{
fn get_input_id(&self, ch_id: ReceiverChannelId) -> Option<(ChannelId, SenderName)> {
if ch_id.0 == 0 {
match &self.input_rx {
&ChannelWrapper::ConnectedReceiver(ref channel_id, ref _receiver, ref sender_name) => {
Some((*channel_id, sender_name.clone()))
},
_ => None,
}
} else {
None
}
}
}
impl<InputValue: Send, InputError: Send, OutputValue: Send, OutputError: Send> InputCounter
for FilterWrap<InputValue, InputError, OutputValue, OutputError>
{
fn get_rx_count(&self, ch_id: ReceiverChannelId) -> usize {
if ch_id.0 == 0 {
if let &ChannelWrapper::ConnectedReceiver(ref _channel_id, ref receiver, ref _sender_name) = &self.input_rx {
receiver.seqno()
} else {
0
}
} else {
0
}
}
}
impl<InputValue: Send, InputError: Send, OutputValue: Send, OutputError: Send> OutputCounter
for FilterWrap<InputValue, InputError, OutputValue, OutputError>
{
fn get_tx_count(&self, ch_id: SenderChannelId) -> usize {
if ch_id.0 == 0 {
self.output_tx.seqno()
} else {
0
}
}
}
impl<InputValue: Send, InputError: Send, OutputValue: Send, OutputError: Send> Connectable
for FilterWrap<InputValue, InputError, OutputValue, OutputError>
{
type InputValue = InputValue;
type InputError = InputError;
fn input(&mut self) -> &mut ChannelWrapper<InputValue, InputError> {
&mut self.input_rx
}
}
impl<InputValue: Send, InputError: Send, OutputValue: Send, OutputError: Send> Task
for FilterWrap<InputValue, InputError, OutputValue, OutputError>
{
fn execute(&mut self, stop: &mut bool) {
self.state.process(&mut self.input_rx, &mut self.output_tx, stop);
}
fn name(&self) -> &String { &self.name }
fn input_count(&self) -> usize { 1 }
fn | (&self) -> usize { 1 }
fn input_id(&self, ch_id: ReceiverChannelId) -> Option<(ChannelId, SenderName)> {
self.get_input_id(ch_id)
}
fn input_channel_pos(&self, ch_id: ReceiverChannelId) -> ChannelPosition {
ChannelPosition( self.get_rx_count(ch_id) )
}
fn output_channel_pos(&self, ch_id: SenderChannelId) -> ChannelPosition {
ChannelPosition( self.get_tx_count(ch_id) )
}
}
| output_count |
modal.ts | import { WinObject } from "components/window/src/window";
abstract class Modal {
protected _target: HTMLElement | null;
protected _background: HTMLElement | null;
constructor(target: HTMLElement, Action: (e: Event) => void = (e: Event) => e.stopPropagation()) {
| this.background!.addEventListener("click", Action, false);
this.target!.parentNode!.insertBefore(this.background!, this.target);
this.background!.appendChild(this.target!);
}
public get target() { return this._target; }
public get background() { return this._background; }
}
export class WindowModal extends Modal {
constructor(window: WinObject) {
super(window.target);
this.background!.classList.add("modal-window");
}
public RemoveModal = () => {
if (!this.background) return;
this.background.remove();
this._background = null;
}
} | this._target = target;
this._background = document.createElement("div");
this.background!.classList.add("modal");
|
source_map.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The SourceMap tracks all the source code used within a single crate, mapping
//! from integer byte positions to the original source code location. Each bit
//! of source parsed during crate parsing (typically files, in-memory strings,
//! or various bits of macro expansion) cover a continuous range of bytes in the
//! SourceMap and are represented by SourceFiles. Byte positions are stored in
//! `spans` and used pervasively in the compiler. They are absolute positions
//! within the SourceMap, which upon request can be converted to line and column
//! information, source code snippets, etc.
pub use crate::syntax_pos::*;
use crate::{
collections::AHashMap,
errors::SourceMapper,
rustc_data_structures::stable_hasher::StableHasher,
sync::{Lock, LockGuard, Lrc, MappedLockGuard},
};
use log::debug;
#[cfg(feature = "sourcemap")]
use sourcemap::SourceMapBuilder;
use std::{
cmp,
cmp::{max, min},
env, fs,
hash::Hash,
io::{self, Read},
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering::SeqCst},
};
// _____________________________________________________________________________
// SourceFile, MultiByteChar, FileName, FileLines
//
/// An abstraction over the fs operations used by the Parser.
pub trait FileLoader {
/// Query the existence of a file.
fn file_exists(&self, path: &Path) -> bool;
/// Return an absolute path to a file, if possible.
fn abs_path(&self, path: &Path) -> Option<PathBuf>;
/// Read the contents of an UTF-8 file into memory.
fn read_file(&self, path: &Path) -> io::Result<String>;
}
/// A FileLoader that uses std::fs to load real files.
pub struct RealFileLoader;
impl FileLoader for RealFileLoader {
fn file_exists(&self, path: &Path) -> bool {
fs::metadata(path).is_ok()
}
fn abs_path(&self, path: &Path) -> Option<PathBuf> {
if path.is_absolute() {
Some(path.to_path_buf())
} else {
env::current_dir().ok().map(|cwd| cwd.join(path))
}
}
fn read_file(&self, path: &Path) -> io::Result<String> {
let mut src = String::new();
fs::File::open(path)?.read_to_string(&mut src)?;
Ok(src)
}
}
// This is a SourceFile identifier that is used to correlate SourceFiles between
// subsequent compilation sessions (which is something we need to do during
// incremental compilation).
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct StableSourceFileId(u128);
impl StableSourceFileId {
pub fn new(source_file: &SourceFile) -> StableSourceFileId {
let mut hasher = StableHasher::new();
source_file.name.hash(&mut hasher);
source_file.name_was_remapped.hash(&mut hasher);
source_file.unmapped_path.hash(&mut hasher);
StableSourceFileId(hasher.finish())
}
}
// _____________________________________________________________________________
// SourceMap
//
#[derive(Default)]
pub(super) struct SourceMapFiles {
pub(super) source_files: Vec<Lrc<SourceFile>>,
stable_id_to_source_file: AHashMap<StableSourceFileId, Lrc<SourceFile>>,
}
/// The interner for spans.
///
/// As most spans are simply stored, we store them as interend form.
///
/// - Each ast node only stores pointer to actual data ([BytePos]).
/// - The pointers ([BytePos]) can be converted to file name, line and column
/// using this struct.
pub struct SourceMap {
pub(super) files: Lock<SourceMapFiles>,
start_pos: AtomicUsize,
file_loader: Box<dyn FileLoader + Sync + Send>,
// This is used to apply the file path remapping as specified via
// --remap-path-prefix to all SourceFiles allocated within this SourceMap.
path_mapping: FilePathMapping,
/// In case we are in a doctest, replace all file names with the PathBuf,
/// and add the given offsets to the line info
doctest_offset: Option<(FileName, isize)>,
}
impl Default for SourceMap {
fn default() -> Self {
Self::new(FilePathMapping::empty())
}
}
impl SourceMap {
pub fn new(path_mapping: FilePathMapping) -> SourceMap {
SourceMap {
files: Default::default(),
start_pos: Default::default(),
file_loader: Box::new(RealFileLoader),
path_mapping,
doctest_offset: None,
}
}
pub fn with_file_loader(
file_loader: Box<dyn FileLoader + Sync + Send>,
path_mapping: FilePathMapping,
) -> SourceMap {
SourceMap {
files: Default::default(),
start_pos: Default::default(),
file_loader,
path_mapping,
doctest_offset: None,
}
}
pub fn path_mapping(&self) -> &FilePathMapping {
&self.path_mapping
}
pub fn file_exists(&self, path: &Path) -> bool {
self.file_loader.file_exists(path)
}
pub fn load_file(&self, path: &Path) -> io::Result<Lrc<SourceFile>> {
let src = self.file_loader.read_file(path)?;
let filename = path.to_owned().into();
Ok(self.new_source_file(filename, src))
}
pub fn files(&self) -> MappedLockGuard<'_, Vec<Lrc<SourceFile>>> {
LockGuard::map(self.files.borrow(), |files| &mut files.source_files)
}
pub fn source_file_by_stable_id(
&self,
stable_id: StableSourceFileId,
) -> Option<Lrc<SourceFile>> {
self.files
.borrow()
.stable_id_to_source_file
.get(&stable_id)
.cloned()
}
fn next_start_pos(&self, len: usize) -> usize {
// Add one so there is some space between files. This lets us distinguish
// positions in the source_map, even in the presence of zero-length files.
self.start_pos.fetch_add(len + 1, SeqCst)
}
/// Creates a new source_file.
/// This does not ensure that only one SourceFile exists per file name.
pub fn new_source_file(&self, filename: FileName, src: String) -> Lrc<SourceFile> {
// The path is used to determine the directory for loading submodules and
// include files, so it must be before remapping.
// Note that filename may not be a valid path, eg it may be `<anon>` etc,
// but this is okay because the directory determined by `path.pop()` will
// be empty, so the working directory will be used.
let unmapped_path = filename.clone();
let (filename, was_remapped) = match filename {
FileName::Real(filename) => {
let (filename, was_remapped) = self.path_mapping.map_prefix(filename);
(FileName::Real(filename), was_remapped)
}
other => (other, false),
};
// We hold lock at here to prevent panic
// If we don't do this, lookup_char_pos and its family **may** panic.
let mut files = self.files.borrow_mut();
let start_pos = self.next_start_pos(src.len());
let source_file = Lrc::new(SourceFile::new(
filename,
was_remapped,
unmapped_path,
src,
Pos::from_usize(start_pos),
));
{
files.source_files.push(source_file.clone());
files
.stable_id_to_source_file
.insert(StableSourceFileId::new(&source_file), source_file.clone());
}
source_file
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo());
format!(
"<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1
)
}
// If there is a doctest_offset, apply it to the line
pub fn doctest_offset_line(&self, mut orig: usize) -> usize {
if let Some((_, line)) = self.doctest_offset {
if line >= 0 {
orig += line as usize;
} else {
orig -= (-line) as usize;
}
}
orig
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
let fm = self.lookup_source_file(pos);
self.lookup_char_pos_with(fm, pos)
}
/// Lookup source information about a BytePos
///
///
/// This method exists only for optimization and it's not part of public
/// api.
#[doc(hidden)]
pub fn lookup_char_pos_with(&self, fm: Lrc<SourceFile>, pos: BytePos) -> Loc {
let line_info = self.lookup_line_with(fm, pos);
match line_info {
Ok(SourceFileAndLine { sf: f, line: a }) => {
let chpos = self.bytepos_to_file_charpos_with(&f, pos);
let line = a + 1; // Line numbers start at 1
let linebpos = f.lines[a];
assert!(
pos >= linebpos,
"{}: bpos = {:?}; linebpos = {:?};",
f.name,
pos,
linebpos,
);
let linechpos = self.bytepos_to_file_charpos_with(&f, linebpos);
let col = max(chpos, linechpos) - min(chpos, linechpos);
let col_display = {
let start_width_idx = f
.non_narrow_chars
.binary_search_by_key(&linebpos, |x| x.pos())
.unwrap_or_else(|x| x);
let end_width_idx = f
.non_narrow_chars
.binary_search_by_key(&pos, |x| x.pos())
.unwrap_or_else(|x| x);
let special_chars = end_width_idx - start_width_idx;
let non_narrow: usize = f.non_narrow_chars[start_width_idx..end_width_idx]
.iter()
.map(|x| x.width())
.sum();
col.0 - special_chars + non_narrow
};
debug!(
"byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos
);
debug!(
"char pos {:?} is on the line at char pos {:?}",
chpos, linechpos
);
debug!("byte is on line: {}", line);
// assert!(chpos >= linechpos);
Loc {
file: f,
line,
col,
col_display,
}
}
Err(f) => {
let chpos = self.bytepos_to_file_charpos(pos);
let col_display = {
let end_width_idx = f
.non_narrow_chars
.binary_search_by_key(&pos, |x| x.pos())
.unwrap_or_else(|x| x);
let non_narrow: usize = f.non_narrow_chars[0..end_width_idx]
.iter()
.map(|x| x.width())
.sum();
chpos.0 - end_width_idx + non_narrow
};
Loc {
file: f,
line: 0,
col: chpos,
col_display,
}
}
}
}
/// If the relevant source_file is empty, we don't return a line number.
pub fn lookup_line(&self, pos: BytePos) -> Result<SourceFileAndLine, Lrc<SourceFile>> {
let f = self.lookup_source_file(pos);
self.lookup_line_with(f, pos)
}
/// If the relevant source_file is empty, we don't return a line number.
///
/// This method exists only for optimization and it's not part of public
/// api.
#[doc(hidden)]
pub fn lookup_line_with(
&self,
f: Lrc<SourceFile>,
pos: BytePos,
) -> Result<SourceFileAndLine, Lrc<SourceFile>> {
match f.lookup_line(pos) {
Some(line) => Ok(SourceFileAndLine { sf: f, line }),
None => Err(f),
}
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.clone(),
line: loc.line,
col: loc.col,
file: Some(loc.file),
}
}
/// Returns `Some(span)`, a union of the lhs and rhs span. The lhs must
/// precede the rhs. If there are gaps between lhs and rhs, the
/// resulting union will cross these gaps. For this to work, the spans
/// have to be:
///
/// * the ctxt of both spans much match
/// * the lhs span needs to end on the same line the rhs span begins
/// * the lhs span must start at or before the rhs span
pub fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
// make sure we're at the same expansion id
if sp_lhs.ctxt() != sp_rhs.ctxt() {
return None;
}
let lhs_end = match self.lookup_line(sp_lhs.hi()) {
Ok(x) => x,
Err(_) => return None,
};
let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
Ok(x) => x,
Err(_) => return None,
};
// if we must cross lines to merge, don't merge
if lhs_end.line != rhs_begin.line {
return None;
}
// ensure these follow the expected order and we don't overlap
if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
Some(sp_lhs.to(sp_rhs))
} else {
None
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().source_files.is_empty() && sp.is_dummy() {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo());
let hi = self.lookup_char_pos_adj(sp.hi());
format!(
"{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1
)
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo()).file.name.clone()
}
pub fn span_to_unmapped_path(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo())
.file
.unmapped_path
.clone()
.expect("SourceMap::span_to_unmapped_path called for imported SourceFile?")
}
pub fn is_multiline(&self, sp: Span) -> bool {
let lo = self.lookup_char_pos(sp.lo());
let hi = self.lookup_char_pos(sp.hi());
lo.line != hi.line
}
pub fn span_to_lines(&self, sp: Span) -> FileLinesResult {
debug!("span_to_lines(sp={:?})", sp);
if sp.lo() > sp.hi() {
return Err(SpanLinesError::IllFormedSpan(sp));
}
let lo = self.lookup_char_pos(sp.lo());
debug!("span_to_lines: lo={:?}", lo);
let hi = self.lookup_char_pos(sp.hi());
debug!("span_to_lines: hi={:?}", hi);
if lo.file.start_pos != hi.file.start_pos {
return Err(SpanLinesError::DistinctSources(DistinctSources {
begin: (lo.file.name.clone(), lo.file.start_pos),
end: (hi.file.name.clone(), hi.file.start_pos),
}));
}
assert!(hi.line >= lo.line);
// Empty file contains no lines
if lo.file.src.is_empty() {
return Ok(FileLines {
file: lo.file,
lines: vec![],
});
}
let mut lines = Vec::with_capacity(hi.line - lo.line + 1);
// The span starts partway through the first line,
// but after that it starts from offset 0.
let mut start_col = lo.col;
// For every line but the last, it extends from `start_col`
// and to the end of the line. Be careful because the line
// numbers in Loc are 1-based, so we subtract 1 to get 0-based
// lines.
for line_index in lo.line - 1..hi.line - 1 {
let line_len = lo
.file
.get_line(line_index)
.map(|s| s.chars().count())
.unwrap_or(0);
lines.push(LineInfo {
line_index,
start_col,
end_col: CharPos::from_usize(line_len),
});
start_col = CharPos::from_usize(0);
}
// For the last line, it extends from `start_col` to `hi.col`:
lines.push(LineInfo {
line_index: hi.line - 1,
start_col,
end_col: hi.col,
});
Ok(FileLines {
file: lo.file,
lines,
})
}
/// Extract the source surrounding the given `Span` using the
/// `extract_source` function. The extract function takes three
/// arguments: a string slice containing the source, an index in
/// the slice for the beginning of the span and an index in the slice for
/// the end of the span.
fn span_to_source<F, Ret>(&self, sp: Span, extract_source: F) -> Result<Ret, SpanSnippetError>
where
F: FnOnce(&str, usize, usize) -> Ret,
{
if sp.lo() > sp.hi() {
return Err(SpanSnippetError::IllFormedSpan(sp));
}
let local_begin = self.lookup_byte_offset(sp.lo());
let local_end = self.lookup_byte_offset(sp.hi());
if local_begin.sf.start_pos != local_end.sf.start_pos {
Err(SpanSnippetError::DistinctSources(DistinctSources {
begin: (local_begin.sf.name.clone(), local_begin.sf.start_pos),
end: (local_end.sf.name.clone(), local_end.sf.start_pos),
}))
} else {
let start_index = local_begin.pos.to_usize();
let end_index = local_end.pos.to_usize();
let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
if start_index > end_index || end_index > source_len {
return Err(SpanSnippetError::MalformedForSourcemap(
MalformedSourceMapPositions {
name: local_begin.sf.name.clone(),
source_len,
begin_pos: local_begin.pos,
end_pos: local_end.pos,
},
));
}
let src = &local_begin.sf.src;
Ok(extract_source(src, start_index, end_index))
}
}
/// Return the source snippet as `String` corresponding to the given `Span`
pub fn span_to_snippet(&self, sp: Span) -> Result<String, SpanSnippetError> {
self.span_to_source(sp, |src, start_index, end_index| {
src[start_index..end_index].to_string()
})
}
pub fn span_to_margin(&self, sp: Span) -> Option<usize> {
match self.span_to_prev_source(sp) {
Err(_) => None,
Ok(source) => source
.split('\n')
.last()
.map(|last_line| last_line.len() - last_line.trim_start().len()),
}
}
/// Calls the given closure with the source snippet before the given `Span`
pub fn with_span_to_prev_source<F, Ret>(&self, sp: Span, op: F) -> Result<Ret, SpanSnippetError>
where
F: FnOnce(&str) -> Ret,
{
self.span_to_source(sp, |src, start_index, _| op(&src[..start_index]))
}
/// Return the source snippet as `String` before the given `Span`
pub fn span_to_prev_source(&self, sp: Span) -> Result<String, SpanSnippetError> {
self.with_span_to_prev_source(sp, |s| s.to_string())
}
/// Calls the given closure with the source snippet after the given `Span`
pub fn with_span_to_next_source<F, Ret>(&self, sp: Span, op: F) -> Result<Ret, SpanSnippetError>
where
F: FnOnce(&str) -> Ret,
{
self.span_to_source(sp, |src, _, end_index| op(&src[end_index..]))
}
/// Return the source snippet as `String` after the given `Span`
pub fn span_to_next_source(&self, sp: Span) -> Result<String, SpanSnippetError> {
self.with_span_to_next_source(sp, |s| s.to_string())
}
/// Extend the given `Span` to just after the previous occurrence of `c`.
/// Return the same span if no character could be found or if an error
/// occurred while retrieving the code snippet.
pub fn span_extend_to_prev_char(&self, sp: Span, c: char) -> Span {
if let Ok(prev_source) = self.span_to_prev_source(sp) {
let prev_source = prev_source.rsplit(c).nth(0).unwrap_or("").trim_start();
if !prev_source.is_empty() && !prev_source.contains('\n') {
return sp.with_lo(BytePos(sp.lo().0 - prev_source.len() as u32));
}
}
sp
}
/// Extend the given `Span` to just after the previous occurrence of `pat`
/// when surrounded by whitespace. Return the same span if no character
/// could be found or if an error occurred while retrieving the code
/// snippet.
pub fn span_extend_to_prev_str(&self, sp: Span, pat: &str, accept_newlines: bool) -> Span {
// assure that the pattern is delimited, to avoid the following
// fn my_fn()
// ^^^^ returned span without the check
// ---------- correct span
for ws in &[" ", "\t", "\n"] {
let pat = pat.to_owned() + ws;
if let Ok(prev_source) = self.span_to_prev_source(sp) {
let prev_source = prev_source.rsplit(&pat).nth(0).unwrap_or("").trim_start();
if !prev_source.is_empty() && (!prev_source.contains('\n') || accept_newlines) {
return sp.with_lo(BytePos(sp.lo().0 - prev_source.len() as u32));
}
}
}
sp
}
/// Given a `Span`, try to get a shorter span ending before the first
/// occurrence of `c` `char`
///
///
/// # Notes
///
/// This method returns a dummy span for a dummy span.
pub fn span_until_char(&self, sp: Span, c: char) -> Span {
if sp.is_dummy() {
return sp;
}
match self.span_to_snippet(sp) {
Ok(snippet) => {
let snippet = snippet.split(c).nth(0).unwrap_or("").trim_end();
if !snippet.is_empty() && !snippet.contains('\n') {
sp.with_hi(BytePos(sp.lo().0 + snippet.len() as u32))
} else {
sp
}
}
_ => sp,
}
}
/// Given a `Span`, try to get a shorter span ending just after the first
/// occurrence of `char` `c`.
///
/// # Notes
///
/// This method returns a dummy span for a dummy span.
pub fn span_through_char(&self, sp: Span, c: char) -> Span {
if sp.is_dummy() {
return sp;
}
if let Ok(snippet) = self.span_to_snippet(sp) {
if let Some(offset) = snippet.find(c) {
return sp.with_hi(BytePos(sp.lo().0 + (offset + c.len_utf8()) as u32));
}
}
sp
}
/// Given a `Span`, get a new `Span` covering the first token and all its
/// trailing whitespace or the original `Span`.
///
/// If `sp` points to `"let mut x"`, then a span pointing at `"let "` will
/// be returned.
pub fn span_until_non_whitespace(&self, sp: Span) -> Span {
let mut whitespace_found = false;
self.span_take_while(sp, |c| {
if !whitespace_found && c.is_whitespace() {
whitespace_found = true;
}
!(whitespace_found && !c.is_whitespace())
})
}
/// Given a `Span`, get a new `Span` covering the first token without its
/// trailing whitespace or the original `Span` in case of error.
///
/// If `sp` points to `"let mut x"`, then a span pointing at `"let"` will be
/// returned.
pub fn span_until_whitespace(&self, sp: Span) -> Span {
self.span_take_while(sp, |c| !c.is_whitespace())
}
/// Given a `Span`, get a shorter one until `predicate` yields false.
pub fn span_take_while<P>(&self, sp: Span, mut predicate: P) -> Span
where
P: for<'r> FnMut(&'r char) -> bool,
{
self.span_to_source(sp, |src, start_index, end_index| {
let snippet = &src[start_index..end_index];
let offset = snippet
.chars()
.take_while(&mut predicate)
.map(|c| c.len_utf8())
.sum::<usize>();
sp.with_hi(BytePos(sp.lo().0 + (offset as u32)))
})
.unwrap_or(sp)
}
pub fn def_span(&self, sp: Span) -> Span {
self.span_until_char(sp, '{')
}
/// Returns a new span representing just the start-point of this span
pub fn start_point(&self, sp: Span) -> Span {
let pos = sp.lo().0;
let width = self.find_width_of_character_at_span(sp, false);
let corrected_start_position = pos.checked_add(width).unwrap_or(pos);
let end_point = BytePos(cmp::max(corrected_start_position, sp.lo().0));
sp.with_hi(end_point)
}
/// Returns a new span representing just the end-point of this span
pub fn end_point(&self, sp: Span) -> Span {
let pos = sp.hi().0;
let width = self.find_width_of_character_at_span(sp, false);
let corrected_end_position = pos.checked_sub(width).unwrap_or(pos);
let end_point = BytePos(cmp::max(corrected_end_position, sp.lo().0));
sp.with_lo(end_point)
}
/// Returns a new span representing the next character after the end-point
/// of this span
pub fn next_point(&self, sp: Span) -> Span {
let start_of_next_point = sp.hi().0;
let width = self.find_width_of_character_at_span(sp, true);
// If the width is 1, then the next span should point to the same `lo` and `hi`.
// However, in the case of a multibyte character, where the width != 1,
// the next span should span multiple bytes to include the whole
// character.
let end_of_next_point = start_of_next_point
.checked_add(width - 1)
.unwrap_or(start_of_next_point);
let end_of_next_point = BytePos(cmp::max(sp.lo().0 + 1, end_of_next_point));
Span::new(BytePos(start_of_next_point), end_of_next_point, sp.ctxt())
}
/// Finds the width of a character, either before or after the provided
/// span.
fn find_width_of_character_at_span(&self, sp: Span, forwards: bool) -> u32 {
// Disregard malformed spans and assume a one-byte wide character.
if sp.lo() >= sp.hi() {
debug!("find_width_of_character_at_span: early return malformed span");
return 1;
}
let local_begin = self.lookup_byte_offset(sp.lo());
let local_end = self.lookup_byte_offset(sp.hi());
debug!(
"find_width_of_character_at_span: local_begin=`{:?}`, local_end=`{:?}`",
local_begin, local_end
);
let start_index = local_begin.pos.to_usize();
let end_index = local_end.pos.to_usize();
debug!(
"find_width_of_character_at_span: start_index=`{:?}`, end_index=`{:?}`",
start_index, end_index
);
// Disregard indexes that are at the start or end of their spans, they can't fit
// bigger characters.
if (!forwards && end_index == usize::min_value())
|| (forwards && start_index == usize::max_value())
{
debug!("find_width_of_character_at_span: start or end of span, cannot be multibyte");
return 1;
}
let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
debug!(
"find_width_of_character_at_span: source_len=`{:?}`",
source_len
);
// Ensure indexes are also not malformed.
if start_index > end_index || end_index > source_len {
debug!("find_width_of_character_at_span: source indexes are malformed");
return 1;
}
// We need to extend the snippet to the end of the src rather than to end_index
// so when searching forwards for boundaries we've got somewhere to
// search.
let src = &local_begin.sf.src;
let snippet = {
let len = src.len();
&src[start_index..len]
};
debug!("find_width_of_character_at_span: snippet=`{:?}`", snippet);
let mut target = if forwards {
end_index + 1
} else {
end_index - 1
};
debug!(
"find_width_of_character_at_span: initial target=`{:?}`",
target
);
while !snippet.is_char_boundary(target - start_index) && target < source_len {
target = if forwards {
target + 1
} else {
match target.checked_sub(1) {
Some(target) => target,
None => {
break;
}
}
};
debug!("find_width_of_character_at_span: target=`{:?}`", target);
}
debug!(
"find_width_of_character_at_span: final target=`{:?}`",
target
);
if forwards {
(target - end_index) as u32
} else {
(end_index - target) as u32
}
}
pub fn get_source_file(&self, filename: &FileName) -> Option<Lrc<SourceFile>> {
for sf in self.files.borrow().source_files.iter() {
if *filename == sf.name {
return Some(sf.clone());
}
}
None
}
/// For a global BytePos compute the local offset within the containing
/// SourceFile
pub fn lookup_byte_offset(&self, bpos: BytePos) -> SourceFileAndBytePos {
let sf = self.lookup_source_file(bpos);
let offset = bpos - sf.start_pos;
SourceFileAndBytePos { sf, pos: offset }
}
/// Converts an absolute BytePos to a CharPos relative to the source_file.
fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let map = self.lookup_source_file(bpos);
self.bytepos_to_file_charpos_with(&map, bpos)
}
fn bytepos_to_file_charpos_with(&self, map: &SourceFile, bpos: BytePos) -> CharPos {
let total_extra_bytes = self.calc_extra_bytes(map, &mut 0, bpos);
assert!(
map.start_pos.to_u32() + total_extra_bytes <= bpos.to_u32(),
"map.start_pos = {:?}; total_extra_bytes = {}; bpos = {:?}",
map.start_pos,
total_extra_bytes,
bpos,
);
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes as usize)
}
/// Converts an absolute BytePos to a CharPos relative to the source_file.
fn calc_extra_bytes(&self, map: &SourceFile, start: &mut usize, bpos: BytePos) -> u32 {
// The number of extra bytes due to multibyte chars in the SourceFile
let mut total_extra_bytes = 0;
for (i, &mbc) in map.multibyte_chars[*start..].iter().enumerate() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes as u32 - 1;
// We should never see a byte position in the middle of a
// character
debug_assert!(bpos.to_u32() >= mbc.pos.to_u32() + mbc.bytes as u32);
} else {
*start += i;
break;
}
}
total_extra_bytes
}
/// Return the index of the source_file (in self.files) which contains pos.
///
/// This method exists only for optimization and it's not part of public
/// api.
#[doc(hidden)]
pub fn lookup_source_file_in(
files: &[Lrc<SourceFile>],
pos: BytePos,
) -> Option<Lrc<SourceFile>> {
let count = files.len();
// Binary search for the source_file.
let mut a = 0;
let mut b = count;
while b - a > 1 {
let m = (a + b) / 2;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
if a >= count {
return None;
}
Some(files[a].clone())
}
/// Return the index of the source_file (in self.files) which contains pos.
///
/// This is not a public api.
#[doc(hidden)]
pub fn lookup_source_file(&self, pos: BytePos) -> Lrc<SourceFile> {
let files = self.files.borrow();
let files = &files.source_files;
let fm = Self::lookup_source_file_in(&files, pos);
match fm {
Some(fm) => fm,
None => {
panic!(
"position {} does not resolve to a source location",
pos.to_usize()
);
}
}
}
pub fn count_lines(&self) -> usize {
self.files().iter().fold(0, |a, f| a + f.count_lines())
}
pub fn generate_fn_name_span(&self, span: Span) -> Option<Span> {
let prev_span = self.span_extend_to_prev_str(span, "fn", true);
self.span_to_snippet(prev_span)
.map(|snippet| {
let len = snippet
.find(|c: char| !c.is_alphanumeric() && c != '_')
.expect("no label after fn");
prev_span.with_hi(BytePos(prev_span.lo().0 + len as u32))
})
.ok()
}
/// Take the span of a type parameter in a function signature and try to
/// generate a span for the function name (with generics) and a new
/// snippet for this span with the pointed type parameter as a new local
/// type parameter.
///
/// For instance:
/// ```rust,ignore (pseudo-Rust)
/// // Given span
/// fn my_function(param: T)
/// // ^ Original span
///
/// // Result
/// fn my_function(param: T)
/// // ^^^^^^^^^^^ Generated span with snippet `my_function<T>`
/// ```
///
/// Attention: The method used is very fragile since it essentially
/// duplicates the work of the parser. If you need to use this function
/// or something similar, please consider updating the source_map
/// functions and this function to something more robust.
pub fn generate_local_type_param_snippet(&self, span: Span) -> Option<(Span, String)> {
// Try to extend the span to the previous "fn" keyword to retrieve the function
// signature
let sugg_span = self.span_extend_to_prev_str(span, "fn", false);
if sugg_span != span {
if let Ok(snippet) = self.span_to_snippet(sugg_span) {
// Consume the function name
let mut offset = snippet
.find(|c: char| !c.is_alphanumeric() && c != '_')
.expect("no label after fn");
// Consume the generics part of the function signature
let mut bracket_counter = 0;
let mut last_char = None;
for c in snippet[offset..].chars() {
match c {
'<' => bracket_counter += 1,
'>' => bracket_counter -= 1,
'(' => {
if bracket_counter == 0 {
break;
}
}
_ => {}
}
offset += c.len_utf8();
last_char = Some(c);
}
// Adjust the suggestion span to encompass the function name with its generics
let sugg_span = sugg_span.with_hi(BytePos(sugg_span.lo().0 + offset as u32));
// Prepare the new suggested snippet to append the type parameter that triggered
// the error in the generics of the function signature
let mut new_snippet = if last_char == Some('>') {
format!("{}, ", &snippet[..(offset - '>'.len_utf8())])
} else {
format!("{}<", &snippet[..offset])
};
new_snippet.push_str(
&self
.span_to_snippet(span)
.unwrap_or_else(|_| "T".to_string()),
);
new_snippet.push('>');
return Some((sugg_span, new_snippet));
}
}
None
}
///
#[cfg(feature = "sourcemap")]
pub fn build_source_map(&self, mappings: &mut Vec<(BytePos, LineCol)>) -> sourcemap::SourceMap {
self.build_source_map_from(mappings, None)
}
/// Creates a `.map` file.
#[cfg(feature = "sourcemap")]
pub fn build_source_map_from(
&self,
mappings: &mut Vec<(BytePos, LineCol)>,
orig: Option<&sourcemap::SourceMap>,
) -> sourcemap::SourceMap {
self.build_source_map_with_config(mappings, orig, DefaultSourceMapGenConfig)
}
#[cfg(feature = "sourcemap")]
pub fn build_source_map_with_config(
&self,
mappings: &mut Vec<(BytePos, LineCol)>,
orig: Option<&sourcemap::SourceMap>,
config: impl SourceMapGenConfig,
) -> sourcemap::SourceMap {
let mut builder = SourceMapBuilder::new(None);
let mut src_id = 0u32;
if let Some(orig) = orig {
for src in orig.sources() {
let idx = builder.add_source(src);
src_id = idx as u32 + 1;
}
for (idx, contents) in orig.source_contents().enumerate() {
builder.set_source_contents(idx as _, contents);
}
}
// // This method is optimized based on the fact that mapping is sorted.
// mappings.sort_by_key(|v| v.0);
let mut cur_file: Option<Lrc<SourceFile>> = None;
let mut ch_start = 0;
let mut line_ch_start = 0;
for (pos, lc) in mappings.iter() {
let pos = *pos;
let lc = *lc;
// TODO: Use correct algorithm
if pos >= BytePos(4294967295) {
continue;
}
let f;
let f = match cur_file {
Some(ref f) if f.start_pos <= pos && pos < f.end_pos => f,
_ => {
f = self.lookup_source_file(pos);
src_id = builder.add_source(&config.file_name_to_source(&f.name));
builder.set_source_contents(src_id, Some(&f.src));
cur_file = Some(f.clone());
ch_start = 0;
line_ch_start = 0;
&f
}
};
let a = match f.lookup_line(pos) {
Some(line) => line as u32,
None => continue,
};
let mut line = a + 1; // Line numbers start at 1
let linebpos = f.lines[a as usize];
debug_assert!(
pos >= linebpos,
"{}: bpos = {:?}; linebpos = {:?};",
f.name,
pos,
linebpos,
);
let chpos = pos.to_u32() - self.calc_extra_bytes(&f, &mut ch_start, pos);
let linechpos =
linebpos.to_u32() - self.calc_extra_bytes(&f, &mut line_ch_start, linebpos);
let mut col = max(chpos, linechpos) - min(chpos, linechpos);
if let Some(orig) = &orig {
if let Some(token) = orig.lookup_token(line, col) {
line = token.get_src_line() + 1;
col = token.get_src_col();
if let Some(src) = token.get_source() {
src_id = builder.add_source(src);
}
}
}
builder.add_raw(lc.line, lc.col, line - 1, col, Some(src_id), None);
}
builder.into_sourcemap()
}
}
impl SourceMapper for SourceMap {
fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_char_pos(pos)
}
fn span_to_lines(&self, sp: Span) -> FileLinesResult {
self.span_to_lines(sp)
}
fn span_to_string(&self, sp: Span) -> String {
self.span_to_string(sp)
}
fn span_to_filename(&self, sp: Span) -> FileName {
self.span_to_filename(sp)
}
fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
self.merge_spans(sp_lhs, sp_rhs)
}
fn call_span_if_macro(&self, sp: Span) -> Span {
sp
}
fn doctest_offset_line(&self, line: usize) -> usize {
self.doctest_offset_line(line)
}
}
#[derive(Clone)]
pub struct FilePathMapping {
mapping: Vec<(PathBuf, PathBuf)>,
}
impl FilePathMapping {
pub fn empty() -> FilePathMapping {
FilePathMapping { mapping: vec![] }
}
pub fn new(mapping: Vec<(PathBuf, PathBuf)>) -> FilePathMapping {
FilePathMapping { mapping }
}
/// Applies any path prefix substitution as defined by the mapping.
/// The return value is the remapped path and a boolean indicating whether
/// the path was affected by the mapping.
pub fn map_prefix(&self, path: PathBuf) -> (PathBuf, bool) {
// NOTE: We are iterating over the mapping entries from last to first
// because entries specified later on the command line should
// take precedence.
for &(ref from, ref to) in self.mapping.iter().rev() {
if let Ok(rest) = path.strip_prefix(from) {
return (to.join(rest), true);
}
}
(path, false)
}
}
pub trait SourceMapGenConfig {
/// # Returns
///
/// File path to used in `SourceMap.sources`.
///
/// This should **not** return content of the file.
fn file_name_to_source(&self, f: &FileName) -> String;
}
#[derive(Debug, Clone)]
pub struct DefaultSourceMapGenConfig;
macro_rules! impl_ref {
($TP:ident, $T:ty) => {
impl<$TP> SourceMapGenConfig for $T
where
$TP: SourceMapGenConfig,
{
fn file_name_to_source(&self, f: &FileName) -> String {
(**self).file_name_to_source(f)
}
}
};
}
impl_ref!(T, &'_ T);
impl_ref!(T, Box<T>);
impl_ref!(T, std::rc::Rc<T>);
impl_ref!(T, std::sync::Arc<T>);
impl SourceMapGenConfig for DefaultSourceMapGenConfig {
fn file_name_to_source(&self, f: &FileName) -> String {
f.to_string()
}
}
// _____________________________________________________________________________
// Tests
//
#[cfg(test)]
mod tests {
use super::*;
use crate::sync::Lrc;
fn init_source_map() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(
PathBuf::from("blork.rs").into(),
"first line.\nsecond line".to_string(),
);
sm.new_source_file(PathBuf::from("empty.rs").into(), String::new());
sm.new_source_file(
PathBuf::from("blork2.rs").into(),
"first line.\nsecond line".to_string(),
);
sm
}
#[test]
fn t3() {
// Test lookup_byte_offset
let sm = init_source_map();
let srcfbp1 = sm.lookup_byte_offset(BytePos(23));
assert_eq!(srcfbp1.sf.name, PathBuf::from("blork.rs").into());
assert_eq!(srcfbp1.pos, BytePos(23));
let srcfbp1 = sm.lookup_byte_offset(BytePos(24));
assert_eq!(srcfbp1.sf.name, PathBuf::from("empty.rs").into());
assert_eq!(srcfbp1.pos, BytePos(0));
let srcfbp2 = sm.lookup_byte_offset(BytePos(25));
assert_eq!(srcfbp2.sf.name, PathBuf::from("blork2.rs").into());
assert_eq!(srcfbp2.pos, BytePos(0));
}
#[test]
fn t4() {
// Test bytepos_to_file_charpos
let sm = init_source_map();
let cp1 = sm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = sm.bytepos_to_file_charpos(BytePos(25));
assert_eq!(cp2, CharPos(0));
}
#[test]
fn t5() {
// Test zero-length source_files.
let sm = init_source_map();
let loc1 = sm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, PathBuf::from("blork.rs").into());
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
let loc2 = sm.lookup_char_pos(BytePos(25));
assert_eq!(loc2.file.name, PathBuf::from("blork2.rs").into());
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
fn init_source_map_mbc() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
// € is a three byte utf8 char.
sm.new_source_file(
PathBuf::from("blork.rs").into(),
"fir€st €€€€ line.\nsecond line".to_string(),
);
sm.new_source_file(
PathBuf::from("blork2.rs").into(),
"first line€€.\n€ second line".to_string(),
);
sm
}
#[test]
fn t6() {
// Test bytepos_to_file_charpos in the presence of multi-byte chars
let sm = init_source_map_mbc();
let cp1 = sm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = sm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = sm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = sm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
#[test]
fn t7() {
// Test span_to_lines for a span ending at the end of source_file
let sm = init_source_map();
let span = Span::new(BytePos(12), BytePos(23), NO_EXPANSION);
let file_lines = sm.span_to_lines(span).unwrap();
assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0].line_index, 1);
}
/// Given a string like " ~~~~~~~~~~~~ ", produces a span
/// converting that range. The idea is that the string has the same
/// length as the input, and we uncover the byte positions. Note
/// that this can span lines and so on.
fn span_from_selection(input: &str, selection: &str) -> Span {
assert_e | n_to_snippet and span_to_lines for a span converting 3
/// lines in the middle of a file.
#[test]
fn span_to_snippet_and_lines_spanning_multiple_lines() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
let selection = " \n ~~\n~~~\n~~~~~ \n \n";
sm.new_source_file(
Path::new("blork.rs").to_owned().into(),
inputtext.to_string(),
);
let span = span_from_selection(inputtext, selection);
// check that we are extracting the text we thought we were extracting
assert_eq!(&sm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
// check that span_to_lines gives us the complete result with the lines/cols we
// expected
let lines = sm.span_to_lines(span).unwrap();
let expected = vec![
LineInfo {
line_index: 1,
start_col: CharPos(4),
end_col: CharPos(6),
},
LineInfo {
line_index: 2,
start_col: CharPos(0),
end_col: CharPos(3),
},
LineInfo {
line_index: 3,
start_col: CharPos(0),
end_col: CharPos(5),
},
];
assert_eq!(lines.lines, expected);
}
#[test]
fn t8() {
// Test span_to_snippet for a span ending at the end of source_file
let sm = init_source_map();
let span = Span::new(BytePos(12), BytePos(23), NO_EXPANSION);
let snippet = sm.span_to_snippet(span);
assert_eq!(snippet, Ok("second line".to_string()));
}
#[test]
fn t9() {
// Test span_to_str for a span ending at the end of source_file
let sm = init_source_map();
let span = Span::new(BytePos(12), BytePos(23), NO_EXPANSION);
let sstr = sm.span_to_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
#[test]
fn t10() {
// Test span_to_lines for a span of empty file
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(PathBuf::from("blork.rs").into(), "".to_string());
let span = Span::new(BytePos(0), BytePos(0), NO_EXPANSION);
let file_lines = sm.span_to_lines(span).unwrap();
assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
assert_eq!(file_lines.lines.len(), 0);
}
/// Test failing to merge two spans on different lines
#[test]
fn span_merging_fail() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "bbbb BB\ncc CCC\n";
let selection1 = " ~~\n \n";
let selection2 = " \n ~~~\n";
sm.new_source_file(
Path::new("blork.rs").to_owned().into(),
inputtext.to_owned(),
);
let span1 = span_from_selection(inputtext, selection1);
let span2 = span_from_selection(inputtext, selection2);
assert!(sm.merge_spans(span1, span2).is_none());
}
/// Returns the span corresponding to the `n`th occurrence of
/// `substring` in `source_text`.
trait SourceMapExtension {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span;
}
impl SourceMapExtension for SourceMap {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span {
let mut i = 0;
let mut hi = 0;
loop {
let offset = source_text[hi..].find(substring).unwrap_or_else(|| {
panic!(
"source_text `{}` does not have {} occurrences of `{}`, only {}",
source_text, n, substring, i
);
});
let lo = hi + offset;
hi = lo + substring.len();
if i == n {
let span = Span::new(
BytePos(lo as u32 + file.start_pos.0),
BytePos(hi as u32 + file.start_pos.0),
NO_EXPANSION,
);
assert_eq!(&self.span_to_snippet(span).unwrap()[..], substring);
return span;
}
i += 1;
}
}
}
}
| q!(input.len(), selection.len());
let left_index = selection.find('~').unwrap() as u32;
let right_index = selection.rfind('~').map(|x| x as u32).unwrap_or(left_index);
Span::new(BytePos(left_index), BytePos(right_index + 1), NO_EXPANSION)
}
/// Test spa |
postprocess_decoded_seq.py | def | (answers):
"""
Corrects for some extra spaces that are created by the decode method
of the tokenizer like in numerical strings
example: 1, 000, 000 --> 1,000,000
Args:
answers: list[str]
Returns:
new_answers: list[str]
"""
new_answers = []
for answer in answers:
parts = answer.split(", ")
if len(parts) > 1:
try:
new0 = parts[0]
for i in range(1, len(parts)):
if new0[-1].isnumeric() and parts[i][0].isnumeric():
if len(parts[i]) > 3 and parts[i][3].isnumeric():
new0 = ", ".join([new0, parts[i]])
else:
new0 = ",".join([new0, parts[i]])
else:
new0 = ", ".join([new0, parts[i]])
except IndexError:
print("--> IndexError:", answer)
new0 = answer
else:
new0 = answer
parts = new0.split(". ")
if len(parts) > 1:
new1 = parts[0]
for i in range(1, len(parts)):
try:
if new1[-1].isnumeric() and parts[i][0].isnumeric():
new1 = ".".join([new1, parts[i]])
else:
new1 = ". ".join([new1, parts[i]])
except IndexError:
new1 = parts[1]
else:
new1 = new0
parts = new1.split(" : ")
if len(parts) > 1:
new2 = parts[0]
for i in range(1, len(parts)):
if new2[-1].isnumeric() and parts[i][0].isnumeric():
new2 = ":".join([new2, parts[i]])
else:
new2 = " : ".join([new2, parts[i]])
else:
new2 = new1
new_answers.append(new2)
return new_answers | postprocess_decoded_seq |
taskrun_test.go | /*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package taskrun
import (
"context"
"errors"
"fmt"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
podconvert "github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/cloudevent"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
"github.com/tektoncd/pipeline/pkg/system"
tb "github.com/tektoncd/pipeline/test/builder"
"github.com/tektoncd/pipeline/test/names"
test "github.com/tektoncd/pipeline/test/v1alpha1"
corev1 "k8s.io/api/core/v1"
k8sapierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
k8sruntimeschema "k8s.io/apimachinery/pkg/runtime/schema"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"knative.dev/pkg/apis"
duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1"
"knative.dev/pkg/configmap"
)
const (
entrypointLocation = "/tekton/tools/entrypoint"
taskNameLabelKey = pipeline.GroupName + pipeline.TaskLabelKey
clusterTaskNameLabelKey = pipeline.GroupName + pipeline.ClusterTaskLabelKey
taskRunNameLabelKey = pipeline.GroupName + pipeline.TaskRunLabelKey
workspaceDir = "/workspace"
currentAPIVersion = "tekton.dev/v1alpha1"
)
var (
images = pipeline.Images{
EntrypointImage: "override-with-entrypoint:latest",
NopImage: "tianon/true",
GitImage: "override-with-git:latest",
CredsImage: "override-with-creds:latest",
KubeconfigWriterImage: "override-with-kubeconfig-writer:latest",
ShellImage: "busybox",
GsutilImage: "google/cloud-sdk",
BuildGCSFetcherImage: "gcr.io/cloud-builders/gcs-fetcher:latest",
PRImage: "override-with-pr:latest",
ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest",
}
ignoreLastTransitionTime = cmpopts.IgnoreTypes(apis.Condition{}.LastTransitionTime.Inner.Time)
// Pods are created with a random 5-character suffix that we want to
// ignore in our diffs.
ignoreRandomPodNameSuffix = cmp.FilterPath(func(path cmp.Path) bool {
return path.GoString() == "{v1.ObjectMeta}.Name"
}, cmp.Comparer(func(name1, name2 string) bool {
return name1[:len(name1)-5] == name2[:len(name2)-5]
}))
resourceQuantityCmp = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})
cloudEventTarget1 = "https://foo"
cloudEventTarget2 = "https://bar"
simpleStep = tb.Step("foo", tb.StepName("simple-step"), tb.StepCommand("/mycmd"))
simpleTask = tb.Task("test-task", tb.TaskSpec(simpleStep), tb.TaskNamespace("foo"))
taskMultipleSteps = tb.Task("test-task-multi-steps", tb.TaskSpec(
tb.Step("foo", tb.StepName("z-step"),
tb.StepCommand("/mycmd"),
),
tb.Step("foo", tb.StepName("v-step"),
tb.StepCommand("/mycmd"),
),
tb.Step("foo", tb.StepName("x-step"),
tb.StepCommand("/mycmd"),
),
), tb.TaskNamespace("foo"))
clustertask = tb.ClusterTask("test-cluster-task", tb.ClusterTaskSpec(simpleStep))
taskSidecar = tb.Task("test-task-sidecar", tb.TaskSpec(
tb.Sidecar("sidecar", "image-id"),
), tb.TaskNamespace("foo"))
taskMultipleSidecars = tb.Task("test-task-sidecar", tb.TaskSpec(
tb.Sidecar("sidecar", "image-id"),
tb.Sidecar("sidecar2", "image-id"),
), tb.TaskNamespace("foo"))
outputTask = tb.Task("test-output-task", tb.TaskSpec(
simpleStep, tb.TaskInputs(
tb.InputsResource(gitResource.Name, v1alpha1.PipelineResourceTypeGit),
tb.InputsResource(anotherGitResource.Name, v1alpha1.PipelineResourceTypeGit),
),
tb.TaskOutputs(tb.OutputsResource(gitResource.Name, v1alpha1.PipelineResourceTypeGit)),
))
saTask = tb.Task("test-with-sa", tb.TaskSpec(tb.Step("foo", tb.StepName("sa-step"), tb.StepCommand("/mycmd"))), tb.TaskNamespace("foo"))
templatedTask = tb.Task("test-task-with-substitution", tb.TaskSpec(
tb.TaskInputs(
tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit),
tb.InputsParamSpec("myarg", v1alpha1.ParamTypeString), tb.InputsParamSpec("myarghasdefault", v1alpha1.ParamTypeString, tb.ParamSpecDefault("dont see me")),
tb.InputsParamSpec("myarghasdefault2", v1alpha1.ParamTypeString, tb.ParamSpecDefault("thedefault")),
tb.InputsParamSpec("configmapname", v1alpha1.ParamTypeString),
),
tb.TaskOutputs(tb.OutputsResource("myimage", v1alpha1.PipelineResourceTypeImage)),
tb.Step("myimage", tb.StepName("mycontainer"), tb.StepCommand("/mycmd"), tb.StepArgs(
"--my-arg=$(inputs.params.myarg)",
"--my-arg-with-default=$(inputs.params.myarghasdefault)",
"--my-arg-with-default2=$(inputs.params.myarghasdefault2)",
"--my-additional-arg=$(outputs.resources.myimage.url)",
)),
tb.Step("myotherimage", tb.StepName("myothercontainer"), tb.StepCommand("/mycmd"), tb.StepArgs(
"--my-other-arg=$(inputs.resources.workspace.url)",
)),
tb.TaskVolume("volume-configmap", tb.VolumeSource(corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "$(inputs.params.configmapname)",
},
},
})),
), tb.TaskNamespace("foo"))
twoOutputsTask = tb.Task("test-two-output-task", tb.TaskSpec(
simpleStep, tb.TaskOutputs(
tb.OutputsResource(cloudEventResource.Name, v1alpha1.PipelineResourceTypeCloudEvent),
tb.OutputsResource(anotherCloudEventResource.Name, v1alpha1.PipelineResourceTypeCloudEvent),
),
), tb.TaskNamespace("foo"))
gitResource = tb.PipelineResource("git-resource", tb.PipelineResourceNamespace("foo"), tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeGit, tb.PipelineResourceSpecParam("URL", "https://foo.git"),
))
anotherGitResource = tb.PipelineResource("another-git-resource", tb.PipelineResourceNamespace("foo"), tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeGit, tb.PipelineResourceSpecParam("URL", "https://foobar.git"),
))
imageResource = tb.PipelineResource("image-resource", tb.PipelineResourceNamespace("foo"), tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeImage, tb.PipelineResourceSpecParam("URL", "gcr.io/kristoff/sven"),
))
cloudEventResource = tb.PipelineResource("cloud-event-resource", tb.PipelineResourceNamespace("foo"), tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeCloudEvent, tb.PipelineResourceSpecParam("TargetURI", cloudEventTarget1),
))
anotherCloudEventResource = tb.PipelineResource("another-cloud-event-resource", tb.PipelineResourceNamespace("foo"), tb.PipelineResourceSpec(
v1alpha1.PipelineResourceTypeCloudEvent, tb.PipelineResourceSpecParam("TargetURI", cloudEventTarget2),
))
toolsVolume = corev1.Volume{
Name: "tekton-internal-tools",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}
workspaceVolume = corev1.Volume{
Name: "tekton-internal-workspace",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}
homeVolume = corev1.Volume{
Name: "tekton-internal-home",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}
resultsVolume = corev1.Volume{
Name: "tekton-internal-results",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}
downwardVolume = corev1.Volume{
Name: "tekton-internal-downward",
VolumeSource: corev1.VolumeSource{
DownwardAPI: &corev1.DownwardAPIVolumeSource{
Items: []corev1.DownwardAPIVolumeFile{{
Path: "ready",
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.annotations['tekton.dev/ready']",
},
}},
},
},
}
getMkdirResourceContainer = func(name, dir, suffix string, ops ...tb.ContainerOp) tb.PodSpecOp {
actualOps := []tb.ContainerOp{
tb.Command("/tekton/tools/entrypoint"),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"mkdir",
"--",
"-p",
dir),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
}
actualOps = append(actualOps, ops...)
return tb.PodContainer(fmt.Sprintf("step-create-dir-%s-%s", name, suffix), "busybox", actualOps...)
}
getPlaceToolsInitContainer = func(ops ...tb.ContainerOp) tb.PodSpecOp {
actualOps := []tb.ContainerOp{
tb.Command("cp", "/ko-app/entrypoint", entrypointLocation),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.Args(),
}
actualOps = append(actualOps, ops...)
return tb.PodInitContainer("place-tools", "override-with-entrypoint:latest", actualOps...)
}
)
func getRunName(tr *v1alpha1.TaskRun) string {
return strings.Join([]string{tr.Namespace, tr.Name}, "/")
}
// getTaskRunController returns an instance of the TaskRun controller/reconciler that has been seeded with
// d, where d represents the state of the system (existing resources) needed for the test.
func getTaskRunController(t *testing.T, d test.Data) (test.Assets, func()) {
//unregisterMetrics()
ctx, _ := ttesting.SetupFakeContext(t)
ctx, cancel := context.WithCancel(ctx)
cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{
SendSuccessfully: true,
}
ctx = cloudevent.WithClient(ctx, &cloudEventClientBehaviour)
c, _ := test.SeedTestData(t, ctx, d)
configMapWatcher := configmap.NewInformedWatcher(c.Kube, system.GetNamespace())
return test.Assets{
Controller: NewController(images)(ctx, configMapWatcher),
Clients: c,
}, cancel
}
func checkEvents(fr *record.FakeRecorder, testName string, wantEvents []string) error {
// The fake recorder runs in a go routine, so the timeout is here to avoid waiting
// on the channel forever if fewer than expected events are received.
// We only hit the timeout in case of failure of the test, so the actual value
// of the timeout is not so relevant, it's only used when tests are going to fail.
// on the channel forever if fewer than expected events are received
timer := time.NewTimer(1 * time.Second)
foundEvents := []string{}
for ii := 0; ii < len(wantEvents)+1; ii++ {
// We loop over all the events that we expect. Once they are all received
// we exit the loop. If we never receive enough events, the timeout takes us
// out of the loop.
select {
case event := <-fr.Events:
foundEvents = append(foundEvents, event)
if ii > len(wantEvents)-1 {
return fmt.Errorf("Received event \"%s\" for %s but not more expected", event, testName)
}
wantEvent := wantEvents[ii]
if !(strings.HasPrefix(event, wantEvent)) {
return fmt.Errorf("Expected event \"%s\" but got \"%s\" instead for %s", wantEvent, event, testName)
}
case <-timer.C:
if len(foundEvents) > len(wantEvents) {
return fmt.Errorf("Received %d events for %s but %d expected. Found events: %#v", len(foundEvents), testName, len(wantEvents), foundEvents)
}
}
}
return nil
}
func TestReconcile_ExplicitDefaultSA(t *testing.T) {
taskRunSuccess := tb.TaskRun("test-taskrun-run-success", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name, tb.TaskRefAPIVersion("a1")),
))
taskRunWithSaSuccess := tb.TaskRun("test-taskrun-with-sa-run-success", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(saTask.Name, tb.TaskRefAPIVersion("a1")),
tb.TaskRunServiceAccountName("test-sa"),
))
taskruns := []*v1alpha1.TaskRun{taskRunSuccess, taskRunWithSaSuccess}
d := test.Data{
TaskRuns: taskruns,
Tasks: []*v1alpha1.Task{simpleTask, saTask},
}
defaultSAName := "pipelines"
defaultCfg := &config.Config{
Defaults: &config.Defaults{
DefaultServiceAccount: defaultSAName,
DefaultTimeoutMinutes: 60,
DefaultManagedByLabelValue: "tekton-pipelines",
},
}
for _, tc := range []struct {
name string
taskRun *v1alpha1.TaskRun
wantPod *corev1.Pod
}{{
name: "success",
taskRun: taskRunSuccess,
wantPod: tb.Pod("test-taskrun-run-success-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-task"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-run-success"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-run-success",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodServiceAccountName(defaultSAName),
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-simple-step", "foo",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/mycmd",
"--",
),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "serviceaccount",
taskRun: taskRunWithSaSuccess,
wantPod: tb.Pod("test-taskrun-with-sa-run-success-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-with-sa"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-sa-run-success"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-sa-run-success",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodServiceAccountName("test-sa"),
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-sa-step", "foo",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/mycmd",
"--",
),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}} {
t.Run(tc.name, func(t *testing.T) {
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
saName := tc.taskRun.Spec.ServiceAccountName
if saName == "" {
saName = defaultSAName
}
if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: saName,
Namespace: tc.taskRun.Namespace,
},
}); err != nil {
t.Fatal(err)
}
ctx := config.ToContext(context.Background(), defaultCfg)
if err := c.Reconciler.Reconcile(ctx, getRunName(tc.taskRun)); err != nil {
t.Errorf("expected no error. Got error %v", err)
}
if len(clients.Kube.Actions()) == 0 {
t.Errorf("Expected actions to be logged in the kubeclient, got none")
}
namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name)
if err != nil {
t.Errorf("Invalid resource key: %v", err)
}
tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(namespace).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("getting updated taskrun: %v", err)
}
condition := tr.Status.GetCondition(apis.ConditionSucceeded)
if condition == nil || condition.Status != corev1.ConditionUnknown {
t.Errorf("Expected invalid TaskRun to have in progress status, but had %v", condition)
}
if condition != nil && condition.Reason != podconvert.ReasonRunning {
t.Errorf("Expected reason %q but was %s", podconvert.ReasonRunning, condition.Reason)
}
if tr.Status.PodName == "" {
t.Fatalf("Reconcile didn't set pod name")
}
pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to fetch build pod: %v", err)
}
if d := cmp.Diff(tc.wantPod.ObjectMeta, pod.ObjectMeta, ignoreRandomPodNameSuffix); d != "" {
t.Errorf("Pod metadata doesn't match (-want, +got): %s", d)
}
if d := cmp.Diff(tc.wantPod.Spec, pod.Spec, resourceQuantityCmp); d != "" {
t.Errorf("Pod spec doesn't match, (-want, +got): %s", d)
}
if len(clients.Kube.Actions()) == 0 {
t.Fatalf("Expected actions to be logged in the kubeclient, got none")
}
})
}
}
func TestReconcile(t *testing.T) {
taskRunSuccess := tb.TaskRun("test-taskrun-run-success", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name, tb.TaskRefAPIVersion("a1")),
))
taskRunWithSaSuccess := tb.TaskRun("test-taskrun-with-sa-run-success", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(saTask.Name, tb.TaskRefAPIVersion("a1")), tb.TaskRunServiceAccountName("test-sa"),
))
taskRunSubstitution := tb.TaskRun("test-taskrun-substitution", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(templatedTask.Name, tb.TaskRefAPIVersion("a1")),
tb.TaskRunInputs(
tb.TaskRunInputsParam("myarg", "foo"),
tb.TaskRunInputsParam("myarghasdefault", "bar"),
tb.TaskRunInputsParam("configmapname", "configbar"),
tb.TaskRunInputsResource("workspace", tb.TaskResourceBindingRef(gitResource.Name)),
),
tb.TaskRunOutputs(tb.TaskRunOutputsResource("myimage", tb.TaskResourceBindingRef("image-resource"))),
))
taskRunInputOutput := tb.TaskRun("test-taskrun-input-output",
tb.TaskRunNamespace("foo"),
tb.TaskRunOwnerReference("PipelineRun", "test"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(outputTask.Name),
tb.TaskRunInputs(
tb.TaskRunInputsResource(gitResource.Name,
tb.TaskResourceBindingRef(gitResource.Name),
tb.TaskResourceBindingPaths("source-folder"),
),
tb.TaskRunInputsResource(anotherGitResource.Name,
tb.TaskResourceBindingRef(anotherGitResource.Name),
tb.TaskResourceBindingPaths("source-folder"),
),
),
tb.TaskRunOutputs(
tb.TaskRunOutputsResource(gitResource.Name,
tb.TaskResourceBindingRef(gitResource.Name),
tb.TaskResourceBindingPaths("output-folder"),
),
),
),
)
taskRunWithTaskSpec := tb.TaskRun("test-taskrun-with-taskspec", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunInputs(
tb.TaskRunInputsParam("myarg", "foo"),
tb.TaskRunInputsResource("workspace", tb.TaskResourceBindingRef(gitResource.Name)),
),
tb.TaskRunTaskSpec(
tb.TaskInputs(
tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit),
tb.InputsParamSpec("myarg", v1alpha1.ParamTypeString, tb.ParamSpecDefault("mydefault")),
),
tb.Step("myimage", tb.StepName("mycontainer"), tb.StepCommand("/mycmd"),
tb.StepArgs("--my-arg=$(inputs.params.myarg)"),
),
),
))
taskRunWithResourceSpecAndTaskSpec := tb.TaskRun("test-taskrun-with-resource-spec", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunInputs(
tb.TaskRunInputsResource("workspace", tb.TaskResourceBindingResourceSpec(&v1alpha1.PipelineResourceSpec{
Type: v1alpha1.PipelineResourceTypeGit,
Params: []v1alpha1.ResourceParam{{
Name: "URL",
Value: "github.com/foo/bar.git",
}, {
Name: "revision",
Value: "rel-can",
}},
})),
),
tb.TaskRunTaskSpec(
tb.TaskInputs(
tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)),
tb.Step("ubuntu", tb.StepName("mystep"), tb.StepCommand("/mycmd")),
),
))
taskRunWithClusterTask := tb.TaskRun("test-taskrun-with-cluster-task",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(tb.TaskRunTaskRef(clustertask.Name, tb.TaskRefKind(v1alpha1.ClusterTaskKind))),
)
taskRunWithLabels := tb.TaskRun("test-taskrun-with-labels",
tb.TaskRunNamespace("foo"),
tb.TaskRunLabel("TaskRunLabel", "TaskRunValue"),
tb.TaskRunLabel(taskRunNameLabelKey, "WillNotBeUsed"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
),
)
taskRunWithAnnotations := tb.TaskRun("test-taskrun-with-annotations",
tb.TaskRunNamespace("foo"),
tb.TaskRunAnnotation("TaskRunAnnotation", "TaskRunValue"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
),
)
taskRunWithPod := tb.TaskRun("test-taskrun-with-pod",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(tb.TaskRunTaskRef(simpleTask.Name)),
tb.TaskRunStatus(tb.PodName("some-pod-abcdethat-no-longer-exists")),
)
taskRunWithCredentialsVariable := tb.TaskRun("test-taskrun-with-credentials-variable", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskSpec(
tb.Step("myimage", tb.StepName("mycontainer"), tb.StepCommand("/mycmd $(credentials.path)")),
),
))
taskruns := []*v1alpha1.TaskRun{
taskRunSuccess, taskRunWithSaSuccess,
taskRunSubstitution, taskRunInputOutput,
taskRunWithTaskSpec, taskRunWithClusterTask, taskRunWithResourceSpecAndTaskSpec,
taskRunWithLabels, taskRunWithAnnotations, taskRunWithPod,
taskRunWithCredentialsVariable,
}
d := test.Data{
TaskRuns: taskruns,
Tasks: []*v1alpha1.Task{simpleTask, saTask, templatedTask, outputTask},
ClusterTasks: []*v1alpha1.ClusterTask{clustertask},
PipelineResources: []*v1alpha1.PipelineResource{gitResource, anotherGitResource, imageResource},
}
for _, tc := range []struct {
name string
taskRun *v1alpha1.TaskRun
wantPod *corev1.Pod
wantEvents []string
}{{
name: "success",
taskRun: taskRunSuccess,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-run-success-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-task"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-run-success"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-run-success",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-simple-step", "foo",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/mycmd",
"--",
),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "serviceaccount",
taskRun: taskRunWithSaSuccess,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-with-sa-run-success-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-with-sa"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-sa-run-success"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-sa-run-success",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodServiceAccountName("test-sa"),
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-sa-step", "foo",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/mycmd",
"--",
),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "params",
taskRun: taskRunSubstitution,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-substitution-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-task-with-substitution"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-substitution"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-substitution",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(
workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume,
corev1.Volume{
Name: "volume-configmap",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "configbar",
},
},
},
},
),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
getMkdirResourceContainer("myimage", "/workspace/output/myimage", "mssqb"),
tb.PodContainer("step-git-source-git-resource-mz4c7", "override-with-git:latest",
tb.Command(entrypointLocation),
tb.Args("-wait_file", "/tekton/tools/0", "-post_file", "/tekton/tools/1", "-termination_path",
"/tekton/termination", "-entrypoint", "/ko-app/git-init", "--", "-url", "https://foo.git",
"-revision", "master", "-path", "/workspace/workspace"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.EnvVar("TEKTON_RESOURCE_NAME", "git-resource"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
tb.PodContainer("step-mycontainer", "myimage",
tb.Command(entrypointLocation),
tb.Args("-wait_file", "/tekton/tools/1", "-post_file", "/tekton/tools/2", "-termination_path",
"/tekton/termination", "-entrypoint", "/mycmd", "--", "--my-arg=foo", "--my-arg-with-default=bar",
"--my-arg-with-default2=thedefault", "--my-additional-arg=gcr.io/kristoff/sven"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
tb.PodContainer("step-myothercontainer", "myotherimage",
tb.Command(entrypointLocation),
tb.Args("-wait_file", "/tekton/tools/2", "-post_file", "/tekton/tools/3", "-termination_path",
"/tekton/termination", "-entrypoint", "/mycmd", "--", "--my-other-arg=https://foo.git"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
tb.PodContainer("step-image-digest-exporter-9l9zj", "override-with-imagedigest-exporter-image:latest",
tb.Command(entrypointLocation),
tb.Args("-wait_file", "/tekton/tools/3", "-post_file", "/tekton/tools/4", "-termination_path",
"/tekton/termination", "-entrypoint", "/ko-app/imagedigestexporter", "--",
"-images", "[{\"name\":\"image-resource\",\"type\":\"image\",\"url\":\"gcr.io/kristoff/sven\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/myimage\"}]"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "taskrun-with-taskspec",
taskRun: taskRunWithTaskSpec,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-with-taskspec-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-taskspec"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-taskspec",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-git-source-git-resource-9l9zj", "override-with-git:latest",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/ko-app/git-init",
"--",
"-url",
"https://foo.git",
"-revision",
"master",
"-path",
"/workspace/workspace",
),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.EnvVar("TEKTON_RESOURCE_NAME", "git-resource"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
tb.PodContainer("step-mycontainer", "myimage",
tb.Command(entrypointLocation),
tb.WorkingDir(workspaceDir),
tb.Args("-wait_file", "/tekton/tools/0", "-post_file", "/tekton/tools/1", "-termination_path",
"/tekton/termination", "-entrypoint", "/mycmd", "--", "--my-arg=foo"),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "success-with-cluster-task",
taskRun: taskRunWithClusterTask,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-with-cluster-task-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-cluster-task"),
tb.PodLabel(clusterTaskNameLabelKey, "test-cluster-task"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-cluster-task"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-cluster-task",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-simple-step", "foo",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/mycmd",
"--",
),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "taskrun-with-resource-spec-task-spec",
taskRun: taskRunWithResourceSpecAndTaskSpec,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-with-resource-spec-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-resource-spec"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-resource-spec",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-git-source-workspace-9l9zj", "override-with-git:latest",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/ko-app/git-init",
"--",
"-url",
"github.com/foo/bar.git",
"-revision",
"rel-can",
"-path",
"/workspace/workspace"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.EnvVar("TEKTON_RESOURCE_NAME", "workspace"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
tb.PodContainer("step-mystep", "ubuntu",
tb.Command(entrypointLocation),
tb.Args("-wait_file", "/tekton/tools/0", "-post_file", "/tekton/tools/1", "-termination_path",
"/tekton/termination", "-entrypoint", "/mycmd", "--"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "taskrun-with-pod",
taskRun: taskRunWithPod,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-with-pod-pod-abcde",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskNameLabelKey, "test-task"),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-pod"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-pod",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-simple-step", "foo",
tb.Command(entrypointLocation),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/mycmd",
"--"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}, {
name: "taskrun-with-credentials-variable-default-tekton-home",
taskRun: taskRunWithCredentialsVariable,
wantEvents: []string{
"Normal Started ",
"Normal Running Not all Steps",
},
wantPod: tb.Pod("test-taskrun-with-credentials-variable-pod-9l9zj",
tb.PodNamespace("foo"),
tb.PodAnnotation(podconvert.ReleaseAnnotation, podconvert.ReleaseAnnotationValue),
tb.PodLabel(taskRunNameLabelKey, "test-taskrun-with-credentials-variable"),
tb.PodLabel("app.kubernetes.io/managed-by", "tekton-pipelines"),
tb.PodOwnerReference("TaskRun", "test-taskrun-with-credentials-variable",
tb.OwnerReferenceAPIVersion(currentAPIVersion)),
tb.PodSpec(
tb.PodVolumes(workspaceVolume, homeVolume, resultsVolume, toolsVolume, downwardVolume),
tb.PodRestartPolicy(corev1.RestartPolicyNever),
getPlaceToolsInitContainer(),
tb.PodContainer("step-mycontainer", "myimage",
tb.Command("/tekton/tools/entrypoint"),
tb.Args("-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
// Important bit here: /tekton/home
"/mycmd /tekton/home",
"--"),
tb.WorkingDir(workspaceDir),
tb.EnvVar("HOME", "/tekton/home"),
tb.VolumeMount("tekton-internal-tools", "/tekton/tools"),
tb.VolumeMount("tekton-internal-downward", "/tekton/downward"),
tb.VolumeMount("tekton-internal-workspace", workspaceDir),
tb.VolumeMount("tekton-internal-home", "/tekton/home"),
tb.VolumeMount("tekton-internal-results", "/tekton/results"),
tb.TerminationMessagePath("/tekton/termination"),
),
),
),
}} {
t.Run(tc.name, func(t *testing.T) {
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
saName := tc.taskRun.Spec.ServiceAccountName
if saName == "" {
saName = "default"
}
if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: saName,
Namespace: tc.taskRun.Namespace,
},
}); err != nil {
t.Fatal(err)
}
reconciler := c.Reconciler.(*Reconciler)
fr := reconciler.Recorder.(*record.FakeRecorder)
if err := reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil {
t.Errorf("expected no error. Got error %v", err)
}
if len(clients.Kube.Actions()) == 0 {
t.Errorf("Expected actions to be logged in the kubeclient, got none")
}
namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name)
if err != nil {
t.Errorf("Invalid resource key: %v", err)
}
tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(namespace).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("getting updated taskrun: %v", err)
}
condition := tr.Status.GetCondition(apis.ConditionSucceeded)
if condition == nil || condition.Status != corev1.ConditionUnknown {
t.Errorf("Expected invalid TaskRun to have in progress status, but had %v", condition)
}
if condition != nil && condition.Reason != podconvert.ReasonRunning {
t.Errorf("Expected reason %q but was %s", podconvert.ReasonRunning, condition.Reason)
}
if tr.Status.PodName == "" {
t.Fatalf("Reconcile didn't set pod name")
}
pod, err := clients.Kube.CoreV1().Pods(tr.Namespace).Get(tr.Status.PodName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to fetch build pod: %v", err)
}
if d := cmp.Diff(tc.wantPod.ObjectMeta, pod.ObjectMeta, ignoreRandomPodNameSuffix); d != "" {
t.Errorf("Pod metadata doesn't match (-want, +got): %s", d)
}
pod.Name = tc.wantPod.Name // Ignore pod name differences, the pod name is generated and tested in pod_test.go
if d := cmp.Diff(tc.wantPod.Spec, pod.Spec, resourceQuantityCmp); d != "" {
t.Errorf("Pod spec doesn't match (-want, +got): %s", d)
}
if len(clients.Kube.Actions()) == 0 {
t.Fatalf("Expected actions to be logged in the kubeclient, got none")
}
err = checkEvents(fr, tc.name, tc.wantEvents)
if !(err == nil) {
t.Errorf(err.Error())
}
})
}
}
func TestReconcile_SetsStartTime(t *testing.T) {
taskRun := tb.TaskRun("test-taskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
))
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
if taskRun.Status.StartTime == nil || taskRun.Status.StartTime.IsZero() {
t.Errorf("expected startTime to be set by reconcile but was %q", taskRun.Status.StartTime)
}
}
func TestReconcile_SortTaskRunStatusSteps(t *testing.T) {
taskRun := tb.TaskRun("test-taskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(taskMultipleSteps.Name)),
tb.TaskRunStatus(
tb.PodName("the-pod"),
),
)
// The order of the container statuses has been shuffled, not aligning with the order of the
// spec steps of the Task any more. After Reconcile is called, we should see the order of status
// steps in TaksRun has been converted to the same one as in spec steps of the Task.
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{taskMultipleSteps},
Pods: []*corev1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "the-pod",
},
Status: corev1.PodStatus{
Phase: corev1.PodSucceeded,
ContainerStatuses: []corev1.ContainerStatus{{
Name: "step-nop",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
}, {
Name: "step-x-step",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
}, {
Name: "step-v-step",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
}, {
Name: "step-z-step",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
}},
},
}},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
verifyTaskRunStatusStep(t, taskRun)
}
func verifyTaskRunStatusStep(t *testing.T, taskRun *v1alpha1.TaskRun) {
actualStepOrder := []string{}
for _, state := range taskRun.Status.Steps {
actualStepOrder = append(actualStepOrder, state.Name)
}
expectedStepOrder := []string{}
for _, state := range taskMultipleSteps.Spec.Steps {
expectedStepOrder = append(expectedStepOrder, state.Name)
}
// Add a nop in the end. This may be removed in future.
expectedStepOrder = append(expectedStepOrder, "nop")
if d := cmp.Diff(expectedStepOrder, actualStepOrder); d != "" {
t.Errorf("The status steps in TaksRun doesn't match the spec steps in Task (-want, +got): %s", d)
}
}
func TestReconcile_DoesntChangeStartTime(t *testing.T) {
startTime := time.Date(2000, 1, 1, 1, 1, 1, 1, time.UTC)
taskRun := tb.TaskRun("test-taskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name)),
tb.TaskRunStatus(
tb.TaskRunStartTime(startTime),
tb.PodName("the-pod"),
),
)
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
Pods: []*corev1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "the-pod",
},
}},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
if taskRun.Status.StartTime.Time != startTime {
t.Errorf("expected startTime %q to be preserved by reconcile but was %q", startTime, taskRun.Status.StartTime)
}
}
func TestReconcileInvalidTaskRuns(t *testing.T) {
noTaskRun := tb.TaskRun("notaskrun", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(tb.TaskRunTaskRef("notask")))
withWrongRef := tb.TaskRun("taskrun-with-wrong-ref", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef("taskrun-with-wrong-ref", tb.TaskRefKind(v1alpha1.ClusterTaskKind)),
))
taskRuns := []*v1alpha1.TaskRun{noTaskRun, withWrongRef}
tasks := []*v1alpha1.Task{simpleTask}
d := test.Data{
TaskRuns: taskRuns,
Tasks: tasks,
}
testcases := []struct {
name string
taskRun *v1alpha1.TaskRun
reason string
wantEvents []string
}{{
name: "task run with no task",
taskRun: noTaskRun,
reason: podconvert.ReasonFailedResolution,
wantEvents: []string{
"Normal Started ",
"Warning Failed ",
},
}, {
name: "task run with wrong ref",
taskRun: withWrongRef,
reason: podconvert.ReasonFailedResolution,
wantEvents: []string{
"Normal Started ",
"Warning Failed ",
},
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
reconciler := c.Reconciler.(*Reconciler)
fr := reconciler.Recorder.(*record.FakeRecorder)
err := reconciler.Reconcile(context.Background(), getRunName(tc.taskRun))
// When a TaskRun is invalid and can't run, we don't want to return an error because
// an error will tell the Reconciler to keep trying to reconcile; instead we want to stop
// and forget about the Run.
if err != nil {
t.Errorf("Did not expect to see error when reconciling invalid TaskRun but saw %q", err)
}
// Check actions and events
actions := clients.Kube.Actions()
if len(actions) != 1 || actions[0].Matches("namespaces", "list") {
t.Errorf("expected one action (list namespaces) created by the reconciler, got %d. Actions: %#v", len(actions), actions)
}
err = checkEvents(fr, tc.name, tc.wantEvents)
if !(err == nil) {
t.Errorf(err.Error())
}
// Since the TaskRun is invalid, the status should say it has failed
condition := tc.taskRun.Status.GetCondition(apis.ConditionSucceeded)
if condition == nil || condition.Status != corev1.ConditionFalse {
t.Errorf("Expected invalid TaskRun to have failed status, but had %v", condition)
}
if condition != nil && condition.Reason != tc.reason {
t.Errorf("Expected failure to be because of reason %q but was %s", tc.reason, condition.Reason)
}
})
}
}
func TestReconcilePodFetchError(t *testing.T) {
taskRun := tb.TaskRun("test-taskrun-run-success",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(tb.TaskRunTaskRef("test-task")),
tb.TaskRunStatus(tb.PodName("will-not-be-found")),
)
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
clients.Kube.PrependReactor("get", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.New("induce failure fetching pods")
})
if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err == nil {
t.Fatal("expected error when reconciling a Task for which we couldn't get the corresponding Pod but got nil")
}
}
func makePod(taskRun *v1alpha1.TaskRun, task *v1alpha1.Task) (*corev1.Pod, error) {
// TODO(jasonhall): This avoids a circular dependency where
// getTaskRunController takes a test.Data which must be populated with
// a pod created from MakePod which requires a (fake) Kube client. When
// we remove Build entirely from this controller, we should simply
// specify the Pod we want to exist directly, and not call MakePod from
// the build. This will break the cycle and allow us to simply use
// clients normally.
kubeclient := fakekubeclientset.NewSimpleClientset(&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: taskRun.Namespace,
},
})
entrypointCache, err := podconvert.NewEntrypointCache(kubeclient)
if err != nil {
return nil, err
}
return podconvert.MakePod(images, taskRun, task.Spec, kubeclient, entrypointCache, true)
}
func TestReconcilePodUpdateStatus(t *testing.T) {
taskRun := tb.TaskRun("test-taskrun-run-success", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(tb.TaskRunTaskRef("test-task")))
pod, err := makePod(taskRun, simpleTask)
if err != nil {
t.Fatalf("MakePod: %v", err)
}
taskRun.Status = v1alpha1.TaskRunStatus{
TaskRunStatusFields: v1alpha1.TaskRunStatusFields{
PodName: pod.Name,
},
}
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
Pods: []*corev1.Pod{pod},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
reconciler := c.Reconciler.(*Reconciler)
fr := reconciler.Recorder.(*record.FakeRecorder)
if err := reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Fatalf("Unexpected error when Reconcile() : %v", err)
}
newTr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
if d := cmp.Diff(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Reason: "Running",
Message: "Not all Steps in the Task have finished executing",
}, newTr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" {
t.Fatalf("Did not get expected condition (-want, +got): %v", d)
}
// update pod status and trigger reconcile : build is completed
pod.Status = corev1.PodStatus{
Phase: corev1.PodSucceeded,
}
if _, err := clients.Kube.CoreV1().Pods(taskRun.Namespace).UpdateStatus(pod); err != nil {
t.Errorf("Unexpected error while updating build: %v", err)
}
if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Fatalf("Unexpected error when Reconcile(): %v", err)
}
newTr, err = clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error fetching taskrun: %v", err)
}
if d := cmp.Diff(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
Reason: podconvert.ReasonSucceeded,
Message: "All Steps have completed executing",
}, newTr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" {
t.Errorf("Did not get expected condition (-want, +got): %v", d)
}
wantEvents := []string{
"Normal Started ",
"Normal Running Not all Steps",
"Normal Succeeded",
}
err = checkEvents(fr, "test-reconcile-pod-updateStatus", wantEvents)
if !(err == nil) {
t.Errorf(err.Error())
}
}
func TestReconcileOnCompletedTaskRun(t *testing.T) {
taskSt := &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
Reason: "Build succeeded",
Message: "Build succeeded",
}
taskRun := tb.TaskRun("test-taskrun-run-success", tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
), tb.TaskRunStatus(tb.StatusCondition(*taskSt)))
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{
taskRun,
},
Tasks: []*v1alpha1.Task{simpleTask},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
if err := c.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err)
}
newTr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
if d := cmp.Diff(taskSt, newTr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" {
t.Fatalf("Did not get expected condition (-want, +got): %v", d)
}
}
func TestReconcileOnCancelledTaskRun(t *testing.T) {
taskRun := tb.TaskRun("test-taskrun-run-cancelled",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunCancelled,
), tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
})))
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
reconciler := c.Reconciler.(*Reconciler)
fr := reconciler.Recorder.(*record.FakeRecorder)
if err := reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err)
}
newTr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
expectedStatus := &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunCancelled",
Message: `TaskRun "test-taskrun-run-cancelled" was cancelled`,
}
if d := cmp.Diff(expectedStatus, newTr.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" {
t.Fatalf("Did not get expected condition (-want, +got): %v", d)
}
wantEvents := []string{
"Normal Started",
"Warning Failed TaskRun \"test-taskrun-run-cancelled\" was cancelled",
}
err = checkEvents(fr, "test-reconcile-on-cancelled-taskrun", wantEvents)
if !(err == nil) {
t.Errorf(err.Error())
}
}
func TestReconcileTimeouts(t *testing.T) {
type testCase struct {
taskRun *v1alpha1.TaskRun
expectedStatus *apis.Condition
wantEvents []string
}
testcases := []testCase{
{
taskRun: tb.TaskRun("test-taskrun-timeout",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunTimeout(10*time.Second),
),
tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown}),
tb.TaskRunStartTime(time.Now().Add(-15*time.Second)))),
expectedStatus: &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunTimeout",
Message: `TaskRun "test-taskrun-timeout" failed to finish within "10s"`,
},
wantEvents: []string{
"Warning Failed ",
},
}, {
taskRun: tb.TaskRun("test-taskrun-default-timeout-60-minutes",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
),
tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown}),
tb.TaskRunStartTime(time.Now().Add(-61*time.Minute)))),
expectedStatus: &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunTimeout",
Message: `TaskRun "test-taskrun-default-timeout-60-minutes" failed to finish within "1h0m0s"`,
},
wantEvents: []string{
"Warning Failed ",
},
}, {
taskRun: tb.TaskRun("test-taskrun-nil-timeout-default-60-minutes",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunNilTimeout,
),
tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown}),
tb.TaskRunStartTime(time.Now().Add(-61*time.Minute)))),
expectedStatus: &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunTimeout",
Message: `TaskRun "test-taskrun-nil-timeout-default-60-minutes" failed to finish within "1h0m0s"`,
},
wantEvents: []string{
"Warning Failed ",
},
}}
for _, tc := range testcases {
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{tc.taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
reconciler := c.Reconciler.(*Reconciler)
fr := reconciler.Recorder.(*record.FakeRecorder)
if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil {
t.Fatalf("Unexpected error when reconciling completed TaskRun : %v", err)
}
newTr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(tc.taskRun.Namespace).Get(tc.taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", tc.taskRun.Name, err)
}
condition := newTr.Status.GetCondition(apis.ConditionSucceeded)
if d := cmp.Diff(tc.expectedStatus, condition, ignoreLastTransitionTime); d != "" {
t.Fatalf("Did not get expected condition (-want, +got): %v", d)
}
err = checkEvents(fr, tc.taskRun.Name, tc.wantEvents)
if !(err == nil) {
t.Errorf(err.Error())
}
}
}
func TestHandlePodCreationError(t *testing.T) {
taskRun := tb.TaskRun("test-taskrun-pod-creation-failed", tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
), tb.TaskRunStatus(
tb.TaskRunStartTime(time.Now()),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
}),
))
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{simpleTask},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c, ok := testAssets.Controller.Reconciler.(*Reconciler)
if !ok {
t.Errorf("failed to construct instance of taskrun reconciler")
return
}
// Prevent backoff timer from starting
c.timeoutHandler.SetTaskRunCallbackFunc(nil)
testcases := []struct {
description string
err error
expectedType apis.ConditionType
expectedStatus corev1.ConditionStatus
expectedReason string
}{{
description: "exceeded quota errors are surfaced in taskrun condition but do not fail taskrun",
err: k8sapierrors.NewForbidden(k8sruntimeschema.GroupResource{Group: "foo", Resource: "bar"}, "baz", errors.New("exceeded quota")),
expectedType: apis.ConditionSucceeded,
expectedStatus: corev1.ConditionUnknown,
expectedReason: podconvert.ReasonExceededResourceQuota,
}, {
description: "errors other than exceeded quota fail the taskrun",
err: errors.New("this is a fatal error"),
expectedType: apis.ConditionSucceeded,
expectedStatus: corev1.ConditionFalse,
expectedReason: podconvert.ReasonCouldntGetTask,
}}
for _, tc := range testcases {
t.Run(tc.description, func(t *testing.T) {
c.handlePodCreationError(taskRun, tc.err)
foundCondition := false
for _, cond := range taskRun.Status.Conditions {
if cond.Type == tc.expectedType && cond.Status == tc.expectedStatus && cond.Reason == tc.expectedReason {
foundCondition = true
break
}
}
if !foundCondition {
t.Errorf("expected to find condition type %q, status %q and reason %q", tc.expectedType, tc.expectedStatus, tc.expectedReason)
}
})
}
}
func TestReconcileCloudEvents(t *testing.T) {
taskRunWithNoCEResources := tb.TaskRun("test-taskrun-no-ce-resources",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name, tb.TaskRefAPIVersion("a1")),
))
taskRunWithTwoCEResourcesNoInit := tb.TaskRun("test-taskrun-two-ce-resources-no-init",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(twoOutputsTask.Name),
tb.TaskRunOutputs(
tb.TaskRunOutputsResource(cloudEventResource.Name, tb.TaskResourceBindingRef(cloudEventResource.Name)),
tb.TaskRunOutputsResource(anotherCloudEventResource.Name, tb.TaskResourceBindingRef(anotherCloudEventResource.Name)),
),
),
)
taskRunWithTwoCEResourcesInit := tb.TaskRun("test-taskrun-two-ce-resources-init",
tb.TaskRunNamespace("foo"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(twoOutputsTask.Name),
tb.TaskRunOutputs(
tb.TaskRunOutputsResource(cloudEventResource.Name, tb.TaskResourceBindingRef(cloudEventResource.Name)),
tb.TaskRunOutputsResource(anotherCloudEventResource.Name, tb.TaskResourceBindingRef(anotherCloudEventResource.Name)),
),
),
tb.TaskRunStatus(
tb.TaskRunCloudEvent(cloudEventTarget1, "", 0, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 0, v1alpha1.CloudEventConditionUnknown),
),
)
taskRunWithCESucceded := tb.TaskRun("test-taskrun-ce-succeeded",
tb.TaskRunNamespace("foo"),
tb.TaskRunSelfLink("/task/1234"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(twoOutputsTask.Name),
tb.TaskRunOutputs(
tb.TaskRunOutputsResource(cloudEventResource.Name, tb.TaskResourceBindingRef(cloudEventResource.Name)),
tb.TaskRunOutputsResource(anotherCloudEventResource.Name, tb.TaskResourceBindingRef(anotherCloudEventResource.Name)),
),
),
tb.TaskRunStatus(
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
tb.TaskRunCloudEvent(cloudEventTarget1, "", 0, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 0, v1alpha1.CloudEventConditionUnknown),
),
)
taskRunWithCEFailed := tb.TaskRun("test-taskrun-ce-failed",
tb.TaskRunNamespace("foo"),
tb.TaskRunSelfLink("/task/1234"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(twoOutputsTask.Name),
tb.TaskRunOutputs(
tb.TaskRunOutputsResource(cloudEventResource.Name, tb.TaskResourceBindingRef(cloudEventResource.Name)),
tb.TaskRunOutputsResource(anotherCloudEventResource.Name, tb.TaskResourceBindingRef(anotherCloudEventResource.Name)),
),
),
tb.TaskRunStatus(
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
}),
tb.TaskRunCloudEvent(cloudEventTarget1, "", 0, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 0, v1alpha1.CloudEventConditionUnknown),
),
)
taskRunWithCESuccededOneAttempt := tb.TaskRun("test-taskrun-ce-succeeded-one-attempt",
tb.TaskRunNamespace("foo"),
tb.TaskRunSelfLink("/task/1234"),
tb.TaskRunSpec(
tb.TaskRunTaskRef(twoOutputsTask.Name),
tb.TaskRunOutputs(
tb.TaskRunOutputsResource(cloudEventResource.Name, tb.TaskResourceBindingRef(cloudEventResource.Name)),
tb.TaskRunOutputsResource(anotherCloudEventResource.Name, tb.TaskResourceBindingRef(anotherCloudEventResource.Name)),
),
),
tb.TaskRunStatus(
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
tb.TaskRunCloudEvent(cloudEventTarget1, "", 1, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "fakemessage", 0, v1alpha1.CloudEventConditionUnknown),
),
)
taskruns := []*v1alpha1.TaskRun{
taskRunWithNoCEResources, taskRunWithTwoCEResourcesNoInit,
taskRunWithTwoCEResourcesInit, taskRunWithCESucceded, taskRunWithCEFailed,
taskRunWithCESuccededOneAttempt,
}
d := test.Data{
TaskRuns: taskruns,
Tasks: []*v1alpha1.Task{simpleTask, twoOutputsTask},
ClusterTasks: []*v1alpha1.ClusterTask{},
PipelineResources: []*v1alpha1.PipelineResource{cloudEventResource, anotherCloudEventResource},
}
for _, tc := range []struct {
name string
taskRun *v1alpha1.TaskRun
wantCloudEvents []v1alpha1.CloudEventDelivery
}{{
name: "no-ce-resources",
taskRun: taskRunWithNoCEResources,
wantCloudEvents: taskRunWithNoCEResources.Status.CloudEvents,
}, {
name: "ce-resources-no-init",
taskRun: taskRunWithTwoCEResourcesNoInit,
wantCloudEvents: tb.TaskRun("want", tb.TaskRunStatus(
tb.TaskRunCloudEvent(cloudEventTarget1, "", 0, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 0, v1alpha1.CloudEventConditionUnknown),
)).Status.CloudEvents,
}, {
name: "ce-resources-init",
taskRun: taskRunWithTwoCEResourcesInit,
wantCloudEvents: tb.TaskRun("want2", tb.TaskRunStatus(
tb.TaskRunCloudEvent(cloudEventTarget1, "", 0, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 0, v1alpha1.CloudEventConditionUnknown),
)).Status.CloudEvents,
}, {
name: "ce-resources-init-task-successful",
taskRun: taskRunWithCESucceded,
wantCloudEvents: tb.TaskRun("want3", tb.TaskRunStatus(
tb.TaskRunCloudEvent(cloudEventTarget1, "", 1, v1alpha1.CloudEventConditionSent),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 1, v1alpha1.CloudEventConditionSent),
)).Status.CloudEvents,
}, {
name: "ce-resources-init-task-failed",
taskRun: taskRunWithCEFailed,
wantCloudEvents: tb.TaskRun("want4", tb.TaskRunStatus(
tb.TaskRunCloudEvent(cloudEventTarget1, "", 1, v1alpha1.CloudEventConditionSent),
tb.TaskRunCloudEvent(cloudEventTarget2, "", 1, v1alpha1.CloudEventConditionSent),
)).Status.CloudEvents,
}, {
name: "ce-resources-init-task-successful-one-attempt",
taskRun: taskRunWithCESuccededOneAttempt,
wantCloudEvents: tb.TaskRun("want5", tb.TaskRunStatus(
tb.TaskRunCloudEvent(cloudEventTarget1, "", 1, v1alpha1.CloudEventConditionUnknown),
tb.TaskRunCloudEvent(cloudEventTarget2, "fakemessage", 1, v1alpha1.CloudEventConditionSent),
)).Status.CloudEvents,
}} {
t.Run(tc.name, func(t *testing.T) {
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c := testAssets.Controller
clients := testAssets.Clients
saName := tc.taskRun.Spec.ServiceAccountName
if saName == "" {
saName = "default"
}
if _, err := clients.Kube.CoreV1().ServiceAccounts(tc.taskRun.Namespace).Create(&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: saName,
Namespace: tc.taskRun.Namespace,
},
}); err != nil {
t.Fatal(err)
}
if err := c.Reconciler.Reconcile(context.Background(), getRunName(tc.taskRun)); err != nil {
t.Errorf("expected no error. Got error %v", err)
}
namespace, name, err := cache.SplitMetaNamespaceKey(tc.taskRun.Name)
if err != nil {
t.Errorf("Invalid resource key: %v", err)
}
tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(namespace).Get(name, metav1.GetOptions{})
if err != nil {
t.Fatalf("getting updated taskrun: %v", err)
}
opts := cloudevent.GetCloudEventDeliveryCompareOptions()
t.Log(tr.Status.CloudEvents)
if diff := cmp.Diff(tc.wantCloudEvents, tr.Status.CloudEvents, opts...); diff != "" {
t.Errorf("Unexpected status of cloud events (-want +got) = %s", diff)
}
})
}
}
func TestUpdateTaskRunResourceResult(t *testing.T) {
for _, c := range []struct {
desc string
podStatus corev1.PodStatus
taskRunStatus *v1alpha1.TaskRunStatus
want []v1alpha1.PipelineResourceResult
}{{
desc: "image resource updated",
podStatus: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`,
},
},
}},
},
want: []v1alpha1.PipelineResourceResult{{
Key: "digest",
Value: "sha256:1234",
ResourceRef: v1alpha1.PipelineResourceRef{Name: "source-image"},
}},
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
tr := &v1alpha1.TaskRun{}
tr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
})
if err := updateTaskRunResourceResult(tr, c.podStatus); err != nil {
t.Errorf("updateTaskRunResourceResult: %s", err)
}
if d := cmp.Diff(c.want, tr.Status.ResourcesResult); d != "" {
t.Errorf("updateTaskRunResourceResult (-want, +got): %s", d)
}
})
}
}
func TestUpdateTaskRunResult(t *testing.T) {
for _, c := range []struct {
desc string
podStatus corev1.PodStatus
taskRunStatus *v1alpha1.TaskRunStatus
wantResults []v1alpha1.TaskRunResult
want []v1alpha1.PipelineResourceResult
}{{
desc: "test result with pipeline result",
podStatus: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}, "type": "PipelineResourceResult"}]`,
},
},
}},
},
wantResults: []v1alpha1.TaskRunResult{{
Name: "resultName",
Value: "resultValue",
}},
want: []v1alpha1.PipelineResourceResult{{
Key: "digest",
Value: "sha256:1234",
ResourceRef: v1alpha1.PipelineResourceRef{Name: "source-image"},
ResultType: "PipelineResourceResult",
}},
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
tr := &v1alpha1.TaskRun{}
tr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
})
if err := updateTaskRunResourceResult(tr, c.podStatus); err != nil {
t.Errorf("updateTaskRunResourceResult: %s", err)
}
if d := cmp.Diff(c.wantResults, tr.Status.TaskRunResults); d != "" {
t.Errorf("updateTaskRunResourceResult TaskRunResults (-want, +got): %s", d)
}
if d := cmp.Diff(c.want, tr.Status.ResourcesResult); d != "" {
t.Errorf("updateTaskRunResourceResult ResourcesResult (-want, +got): %s", d)
}
})
}
}
func TestUpdateTaskRunResult2(t *testing.T) {
for _, c := range []struct {
desc string
podStatus corev1.PodStatus
taskRunStatus *v1alpha1.TaskRunStatus
wantResults []v1alpha1.TaskRunResult
want []v1alpha1.PipelineResourceResult
}{{
desc: "test result with pipeline result - no result type",
podStatus: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`,
},
},
}},
},
wantResults: []v1alpha1.TaskRunResult{{
Name: "resultName",
Value: "resultValue",
}},
want: []v1alpha1.PipelineResourceResult{{
Key: "digest",
Value: "sha256:1234",
ResourceRef: v1alpha1.PipelineResourceRef{Name: "source-image"},
}},
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
tr := &v1alpha1.TaskRun{}
tr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
})
if err := updateTaskRunResourceResult(tr, c.podStatus); err != nil {
t.Errorf("updateTaskRunResourceResult: %s", err)
}
if d := cmp.Diff(c.wantResults, tr.Status.TaskRunResults); d != "" {
t.Errorf("updateTaskRunResourceResult (-want, +got): %s", d)
}
if d := cmp.Diff(c.want, tr.Status.ResourcesResult); d != "" {
t.Errorf("updateTaskRunResourceResult (-want, +got): %s", d)
}
})
}
}
func TestUpdateTaskRunResultTwoResults(t *testing.T) |
func TestUpdateTaskRunResultWhenTaskFailed(t *testing.T) {
for _, c := range []struct {
desc string
podStatus corev1.PodStatus
taskRunStatus *v1alpha1.TaskRunStatus
wantResults []v1alpha1.TaskRunResult
want []v1alpha1.PipelineResourceResult
}{{
desc: "update task results when task fails",
podStatus: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultName","value":"resultValue", "type": "TaskRunResult"}, {"name":"source-image","digest":"sha256:1234"}]`,
},
},
}},
},
taskRunStatus: &v1alpha1.TaskRunStatus{
Status: duckv1beta1.Status{Conditions: []apis.Condition{{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
}}},
},
wantResults: nil,
want: nil,
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
if d := cmp.Diff(c.want, c.taskRunStatus.ResourcesResult); d != "" {
t.Errorf("updateTaskRunResourceResult resources (-want, +got): %s", d)
}
if d := cmp.Diff(c.wantResults, c.taskRunStatus.TaskRunResults); d != "" {
t.Errorf("updateTaskRunResourceResult results (-want, +got): %s", d)
}
})
}
}
func TestUpdateTaskRunResourceResult_Errors(t *testing.T) {
for _, c := range []struct {
desc string
podStatus corev1.PodStatus
taskRunStatus *v1alpha1.TaskRunStatus
want []v1alpha1.PipelineResourceResult
}{{
desc: "image resource exporter with malformed json output",
podStatus: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `MALFORMED JSON{"digest":"sha256:1234"}`,
},
},
}},
},
taskRunStatus: &v1alpha1.TaskRunStatus{
Status: duckv1beta1.Status{Conditions: []apis.Condition{{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}}},
},
want: nil,
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
if err := updateTaskRunResourceResult(&v1alpha1.TaskRun{Status: *c.taskRunStatus}, c.podStatus); err == nil {
t.Error("Expected error, got nil")
}
if d := cmp.Diff(c.want, c.taskRunStatus.ResourcesResult); d != "" {
t.Errorf("updateTaskRunResourceResult (-want, +got): %s", d)
}
})
}
}
func TestReconcile_Single_SidecarState(t *testing.T) {
runningState := corev1.ContainerStateRunning{StartedAt: metav1.Time{Time: time.Now()}}
taskRun := tb.TaskRun("test-taskrun-sidecars",
tb.TaskRunSpec(
tb.TaskRunTaskRef(taskSidecar.Name),
),
tb.TaskRunStatus(
tb.SidecarState(
tb.SidecarStateName("sidecar"),
tb.SidecarStateImageID("image-id"),
tb.SidecarStateContainerName("sidecar-sidecar"),
tb.SetSidecarStateRunning(runningState),
),
),
)
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{taskSidecar},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
clients := testAssets.Clients
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
getTaskRun, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
expected := v1alpha1.SidecarState{
Name: "sidecar",
ImageID: "image-id",
ContainerName: "sidecar-sidecar",
ContainerState: corev1.ContainerState{
Running: &runningState,
},
}
if c := cmp.Diff(expected, getTaskRun.Status.Sidecars[0]); c != "" {
t.Errorf("TestReconcile_Single_SidecarState (-want, +got): %s", c)
}
}
func TestReconcile_Multiple_SidecarStates(t *testing.T) {
runningState := corev1.ContainerStateRunning{StartedAt: metav1.Time{Time: time.Now()}}
waitingState := corev1.ContainerStateWaiting{Reason: "PodInitializing"}
taskRun := tb.TaskRun("test-taskrun-sidecars",
tb.TaskRunSpec(
tb.TaskRunTaskRef(taskMultipleSidecars.Name),
),
tb.TaskRunStatus(
tb.SidecarState(
tb.SidecarStateName("sidecar1"),
tb.SidecarStateImageID("image-id"),
tb.SidecarStateContainerName("sidecar-sidecar1"),
tb.SetSidecarStateRunning(runningState),
),
),
tb.TaskRunStatus(
tb.SidecarState(
tb.SidecarStateName("sidecar2"),
tb.SidecarStateImageID("image-id"),
tb.SidecarStateContainerName("sidecar-sidecar2"),
tb.SetSidecarStateWaiting(waitingState),
),
),
)
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{taskRun},
Tasks: []*v1alpha1.Task{taskMultipleSidecars},
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
clients := testAssets.Clients
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
getTaskRun, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected completed TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
expected := []v1alpha1.SidecarState{
{
Name: "sidecar1",
ImageID: "image-id",
ContainerName: "sidecar-sidecar1",
ContainerState: corev1.ContainerState{
Running: &runningState,
},
},
{
Name: "sidecar2",
ImageID: "image-id",
ContainerName: "sidecar-sidecar2",
ContainerState: corev1.ContainerState{
Waiting: &waitingState,
},
},
}
for i, sc := range getTaskRun.Status.Sidecars {
if c := cmp.Diff(expected[i], sc); c != "" {
t.Errorf("TestReconcile_Multiple_SidecarStates sidecar%d (-want, +got): %s", i+1, c)
}
}
}
// TestReconcileWorkspaceMissing tests a reconcile of a TaskRun that does
// not include a Workspace that the Task is expecting.
func TestReconcileWorkspaceMissing(t *testing.T) {
taskWithWorkspace := tb.Task("test-task-with-workspace",
tb.TaskSpec(
tb.TaskWorkspace("ws1", "a test task workspace", "", true),
), tb.TaskNamespace("foo"))
taskRun := tb.TaskRun("test-taskrun-missing-workspace", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(taskWithWorkspace.Name, tb.TaskRefAPIVersion("a1")),
))
d := test.Data{
Tasks: []*v1alpha1.Task{taskWithWorkspace},
TaskRuns: []*v1alpha1.TaskRun{taskRun},
ClusterTasks: nil,
PipelineResources: nil,
}
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
clients := testAssets.Clients
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
failedCorrectly := false
for _, c := range tr.Status.Conditions {
if c.Type == apis.ConditionSucceeded && c.Status == corev1.ConditionFalse && c.Reason == podconvert.ReasonFailedValidation {
failedCorrectly = true
}
}
if !failedCorrectly {
t.Errorf("Expected TaskRun to fail validation but it did not. Final conditions were:\n%#v", tr.Status.Conditions)
}
}
func TestReconcileTaskResourceResolutionAndValidation(t *testing.T) {
for _, tt := range []struct {
desc string
d test.Data
wantFailedReason string
wantEvents []string
}{{
desc: "Fail ResolveTaskResources",
d: test.Data{
Tasks: []*v1alpha1.Task{
tb.Task("test-task-missing-resource",
tb.TaskSpec(
tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)),
), tb.TaskNamespace("foo")),
},
TaskRuns: []*v1alpha1.TaskRun{
tb.TaskRun("test-taskrun-missing-resource", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef("test-task-missing-resource", tb.TaskRefAPIVersion("a1")),
tb.TaskRunInputs(
tb.TaskRunInputsResource("workspace", tb.TaskResourceBindingRef("git")),
),
)),
},
ClusterTasks: nil,
PipelineResources: nil,
},
wantFailedReason: podconvert.ReasonFailedResolution,
wantEvents: []string{
"Normal Started ",
"Warning Failed",
},
}, {
desc: "Fail ValidateResolvedTaskResources",
d: test.Data{
Tasks: []*v1alpha1.Task{
tb.Task("test-task-missing-resource",
tb.TaskSpec(
tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit)),
), tb.TaskNamespace("foo")),
},
TaskRuns: []*v1alpha1.TaskRun{
tb.TaskRun("test-taskrun-missing-resource", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef("test-task-missing-resource", tb.TaskRefAPIVersion("a1")),
)),
},
ClusterTasks: nil,
PipelineResources: nil,
},
wantFailedReason: podconvert.ReasonFailedValidation,
wantEvents: []string{
"Normal Started ",
"Warning Failed",
},
}} {
t.Run(tt.desc, func(t *testing.T) {
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, tt.d)
defer cancel()
clients := testAssets.Clients
reconciler := testAssets.Controller.Reconciler.(*Reconciler)
fr := reconciler.Recorder.(*record.FakeRecorder)
if err := reconciler.Reconcile(context.Background(), getRunName(tt.d.TaskRuns[0])); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
tr, err := clients.Pipeline.TektonV1alpha1().TaskRuns(tt.d.TaskRuns[0].Namespace).Get(tt.d.TaskRuns[0].Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected TaskRun %s to exist but instead got error when getting it: %v", tt.d.TaskRuns[0].Name, err)
}
for _, c := range tr.Status.Conditions {
if c.Type != apis.ConditionSucceeded || c.Status != corev1.ConditionFalse || c.Reason != tt.wantFailedReason {
t.Errorf("Expected TaskRun to \"%s\" but it did not. Final conditions were:\n%#v", tt.wantFailedReason, tr.Status.Conditions)
}
}
err = checkEvents(fr, tt.desc, tt.wantEvents)
if !(err == nil) {
t.Errorf(err.Error())
}
})
}
}
// TestReconcileWorkspaceWithVolumeClaimTemplate tests a reconcile of a TaskRun that has
// a Workspace with VolumeClaimTemplate and check that it is translated to a created PersistentVolumeClaim.
func TestReconcileWorkspaceWithVolumeClaimTemplate(t *testing.T) {
workspaceName := "ws1"
claimName := "mypvc"
taskWithWorkspace := tb.Task("test-task-with-workspace", tb.TaskNamespace("foo"),
tb.TaskSpec(
tb.TaskWorkspace(workspaceName, "a test task workspace", "", true),
))
taskRun := tb.TaskRun("test-taskrun-missing-workspace", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(taskWithWorkspace.Name, tb.TaskRefAPIVersion("a1")),
tb.TaskRunWorkspaceVolumeClaimTemplate(workspaceName, "", &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: claimName,
},
Spec: corev1.PersistentVolumeClaimSpec{},
}),
))
d := test.Data{
Tasks: []*v1alpha1.Task{taskWithWorkspace},
TaskRuns: []*v1alpha1.TaskRun{taskRun},
ClusterTasks: nil,
PipelineResources: nil,
}
names.TestingSeed()
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
clients := testAssets.Clients
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(taskRun)); err != nil {
t.Errorf("expected no error reconciling valid TaskRun but got %v", err)
}
ttt, err := clients.Pipeline.TektonV1alpha1().TaskRuns(taskRun.Namespace).Get(taskRun.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("expected TaskRun %s to exist but instead got error when getting it: %v", taskRun.Name, err)
}
for _, w := range ttt.Spec.Workspaces {
if w.PersistentVolumeClaim != nil {
t.Fatalf("expected workspace from volumeClaimTemplate to be translated to PVC")
}
}
expectedPVCName := fmt.Sprintf("%s-%s-%s", claimName, workspaceName, taskRun.Name)
_, err = clients.Kube.CoreV1().PersistentVolumeClaims(taskRun.Namespace).Get(expectedPVCName, metav1.GetOptions{})
if err != nil {
t.Fatalf("expected PVC %s to exist but instead got error when getting it: %v", expectedPVCName, err)
}
}
func TestFailTaskRun(t *testing.T) {
testCases := []struct {
name string
taskRun *v1alpha1.TaskRun
pod *corev1.Pod
reason string
message string
expectedStatus apis.Condition
}{{
name: "no-pod-scheduled",
taskRun: tb.TaskRun("test-taskrun-run-failed", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunCancelled,
), tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
}))),
reason: "some reason",
message: "some message",
expectedStatus: apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "some reason",
Message: "some message",
},
}, {
name: "pod-scheduled",
taskRun: tb.TaskRun("test-taskrun-run-failed", tb.TaskRunNamespace("foo"), tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunCancelled,
), tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
}), tb.PodName("foo-is-bar"))),
pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "foo-is-bar",
}},
reason: "some reason",
message: "some message",
expectedStatus: apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "some reason",
Message: "some message",
},
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{tc.taskRun},
}
if tc.pod != nil {
d.Pods = []*corev1.Pod{tc.pod}
}
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c, ok := testAssets.Controller.Reconciler.(*Reconciler)
if !ok {
t.Errorf("failed to construct instance of taskrun reconciler")
return
}
err := c.failTaskRun(tc.taskRun, tc.reason, tc.message)
if err != nil {
t.Fatal(err)
}
if d := cmp.Diff(tc.taskRun.Status.GetCondition(apis.ConditionSucceeded), &tc.expectedStatus, ignoreLastTransitionTime); d != "" {
t.Fatalf("-want, +got: %v", d)
}
})
}
}
func Test_storeTaskSpec(t *testing.T) {
ctx := context.Background()
tr := tb.TaskRun("foo", tb.TaskRunSpec(tb.TaskRunTaskRef("foo-task")))
ts := tb.Task("some-task", tb.TaskSpec(tb.TaskDescription("foo-task"))).Spec
want := &v1beta1.TaskSpec{}
if err := ts.ConvertTo(ctx, want); err != nil {
t.Errorf("error converting to v1beta1: %v", err)
}
// The first time we set it, it should get copied.
if err := storeTaskSpec(ctx, tr, &ts); err != nil {
t.Errorf("storeTaskSpec() error = %v", err)
}
if d := cmp.Diff(tr.Status.TaskSpec, want); d != "" {
t.Fatalf("-want, +got: %v", d)
}
ts.Description = "new-task"
// The next time, it should not get overwritten
if err := storeTaskSpec(ctx, tr, &ts); err != nil {
t.Errorf("storeTaskSpec() error = %v", err)
}
if d := cmp.Diff(tr.Status.TaskSpec, want); d != "" {
t.Fatalf("-want, +got: %v", d)
}
}
| {
for _, c := range []struct {
desc string
podStatus corev1.PodStatus
taskRunStatus *v1alpha1.TaskRunStatus
want []v1alpha1.TaskRunResult
}{{
desc: "two test results",
podStatus: corev1.PodStatus{
ContainerStatuses: []corev1.ContainerStatus{{
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultNameOne","value":"resultValueOne", "type": "TaskRunResult"},{"key":"resultNameTwo","value":"resultValueTwo", "type": "TaskRunResult"}]`,
},
},
}},
},
want: []v1alpha1.TaskRunResult{{
Name: "resultNameOne",
Value: "resultValueOne",
}, {
Name: "resultNameTwo",
Value: "resultValueTwo",
}},
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
tr := &v1alpha1.TaskRun{}
tr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
})
if err := updateTaskRunResourceResult(tr, c.podStatus); err != nil {
t.Errorf("updateTaskRunResourceResult: %s", err)
}
if d := cmp.Diff(c.want, tr.Status.TaskRunResults); d != "" {
t.Errorf("updateTaskRunResourceResult (-want, +got): %s", d)
}
})
}
} |
Loading.tsx | /*
* Copyright 2022 Fernando Boucquez
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from 'react';
import { Spinner } from 'react-bootstrap';
export default function | () {
return <Spinner as="span" animation="border" size="sm" role="status" aria-hidden="true" />;
}
| Loading |
libpng.py | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing libpng installation and cleanup functions."""
def | (vm):
"""Installs the libpng package on the VM."""
vm.InstallPackages('libpng')
vm.InstallPackages('libpng-devel')
def AptInstall(vm):
"""Installs the libpng package on the VM."""
vm.InstallPackages('libpng3 libpng12-dev')
| YumInstall |
rebuild.go | package action
import (
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/Masterminds/glide/cfg"
"github.com/Masterminds/glide/msg"
gpath "github.com/Masterminds/glide/path"
)
// Rebuild rebuilds '.a' files for a project.
//
// Prior to Go 1.4, this could substantially reduce time on incremental compiles.
// It remains to be seen whether this is tremendously beneficial to modern Go
// programs.
func Rebuild() {
msg.Warn("The rebuild command is deprecated and will be removed in a future version")
msg.Warn("Use the go install command instead")
conf := EnsureConfig()
vpath, err := gpath.Vendor()
if err != nil {
msg.Die("Could not get vendor path: %s", err)
}
msg.Info("Building dependencies.\n")
if len(conf.Imports) == 0 {
msg.Info("No dependencies found. Nothing built.\n")
return
}
for _, dep := range conf.Imports {
if err := buildDep(dep, vpath); err != nil {
msg.Warn("Failed to build %s: %s\n", dep.Name, err)
}
}
}
func buildDep(dep *cfg.Dependency, vpath string) error {
if len(dep.Subpackages) == 0 {
buildPath(dep.Name)
}
for _, pkg := range dep.Subpackages {
if pkg == "**" || pkg == "..." {
//Info("Building all packages in %s\n", dep.Name)
buildPath(path.Join(dep.Name, "..."))
} else {
paths, err := resolvePackages(vpath, dep.Name, pkg)
if err != nil {
msg.Warn("Error resolving packages: %s", err)
}
buildPaths(paths)
}
}
return nil
}
func resolvePackages(vpath, pkg, subpkg string) ([]string, error) {
sdir, _ := os.Getwd()
if err := os.Chdir(filepath.Join(vpath, pkg, subpkg)); err != nil {
return []string{}, err
}
defer os.Chdir(sdir)
p, err := filepath.Glob(path.Join(vpath, pkg, subpkg))
if err != nil {
return []string{}, err
}
for k, v := range p {
nv := strings.TrimPrefix(v, vpath)
p[k] = strings.TrimPrefix(nv, string(filepath.Separator))
}
return p, nil
}
func buildPaths(paths []string) error |
func buildPath(path string) error {
msg.Info("Running go build %s\n", path)
// . in a filepath.Join is removed so it needs to be prepended separately.
p := "." + string(filepath.Separator) + filepath.Join("vendor", path)
out, err := exec.Command(goExecutable(), "install", p).CombinedOutput()
if err != nil {
msg.Warn("Failed to run 'go install' for %s: %s", path, string(out))
}
return err
}
| {
for _, path := range paths {
if err := buildPath(path); err != nil {
return err
}
}
return nil
} |
test_0224-arrow-to-awkward.py | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
pyarrow = pytest.importorskip("pyarrow")
pytest.importorskip("awkward._v2._connect.pyarrow")
to_list = ak._v2.operations.to_list
def test_toarrow_BitMaskedArray():
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
bitmask = ak._v2.index.IndexU8(np.array([40, 34], dtype=np.uint8))
array = ak._v2.contents.BitMaskedArray(bitmask, content, False, 9, False)
assert array.to_arrow().to_pylist() == to_list(array)
def test_toarrow_ByteMaskedArray_1():
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
bytemask = ak._v2.index.Index8(np.array([False, True, False], dtype=np.bool_))
array = ak._v2.contents.ByteMaskedArray(bytemask, content, True)
assert array.to_arrow().to_pylist() == to_list(array)
def test_toarrow_NumpyArray_1():
array = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))
assert isinstance(array.to_arrow(), pyarrow.lib.Array)
assert array.to_arrow().to_pylist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]
def test_toarrow_NumpyArray_2():
array = ak._v2.contents.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]))
assert isinstance(array.to_arrow(), pyarrow.lib.Array)
assert array.to_arrow().to_pylist() == [[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]
def test_toarrow_EmptyArray():
array = ak._v2.contents.EmptyArray()
assert isinstance(array.to_arrow(), pyarrow.lib.Array)
assert array.to_arrow().to_pylist() == []
def test_toarrow_ListOffsetArray64():
content = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 9]))
array = ak._v2.contents.ListOffsetArray(offsets, content)
assert isinstance(array.to_arrow().storage, pyarrow.LargeListArray)
assert array.to_arrow().to_pylist() == [
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9],
]
assert array[1:].to_arrow().to_pylist() == [
[],
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9],
]
assert array[2:].to_arrow().to_pylist() == [
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9],
]
def test_toarrow_ListOffsetArrayU32():
content = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.IndexU32(np.array([0, 3, 3, 5, 6, 9]))
array = ak._v2.contents.ListOffsetArray(offsets, content)
assert isinstance(array.to_arrow().storage, pyarrow.ListArray)
assert array.to_arrow().to_pylist() == [
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9],
]
assert array[1:].to_arrow().to_pylist() == [
[],
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9],
]
assert array[2:].to_arrow().to_pylist() == [
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9],
]
def test_toarrow_ListArray_RegularArray():
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
array = ak._v2.contents.ListOffsetArray(offsets, content)
assert array.to_arrow().to_pylist() == [
["one", "two", "three"],
[],
["four", "five"],
["six"],
["seven", "eight", "nine"],
]
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
starts = ak._v2.index.Index64(np.array([0, 1], dtype=np.int64))
stops = ak._v2.index.Index64(np.array([2, 3], dtype=np.int64))
listarray = ak._v2.contents.ListArray(starts, stops, regulararray)
assert isinstance(listarray.to_arrow().storage, pyarrow.LargeListArray)
assert listarray.to_arrow().to_pylist() == [
[[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]]],
[[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]],
]
assert listarray[1:].to_arrow().to_pylist() == [
[[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]],
]
assert isinstance(regulararray.to_arrow().storage, pyarrow.FixedSizeListArray)
assert regulararray.to_arrow().to_pylist() == [
[[0.0, 1.1, 2.2], []],
[[3.3, 4.4], [5.5]],
[[6.6, 7.7, 8.8, 9.9], []],
]
assert regulararray[1:].to_arrow().to_pylist() == [
[[3.3, 4.4], [5.5]],
[[6.6, 7.7, 8.8, 9.9], []],
]
def test_toarrow_RecordArray():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "two", "2", "wonky"],
)
assert isinstance(recordarray.to_arrow().storage, pyarrow.StructArray)
assert recordarray.to_arrow().to_pylist() == [
{"one": 1, "two": [0.0, 1.1, 2.2], "2": 1.1, "wonky": 1},
{"one": 2, "two": [], "2": 2.2, "wonky": 2},
{"one": 3, "two": [3.3, 4.4], "2": 3.3, "wonky": 3},
{"one": 4, "two": [5.5], "2": 4.4, "wonky": 4},
{"one": 5, "two": [6.6, 7.7, 8.8, 9.9], "2": 5.5, "wonky": 5},
]
def test_toarrow_UnionArray():
content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))
index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))
unionarray = ak._v2.contents.UnionArray(tags, index, [content0, content1])
assert isinstance(unionarray.to_arrow().storage, pyarrow.UnionArray)
assert unionarray.to_arrow().to_pylist() == [
1,
2,
[1.1, 2.2, 3.3],
[],
3,
[4.4, 5.5],
5,
4,
]
def test_toarrow_IndexedArray():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
index = ak._v2.index.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
assert isinstance(indexedarray.to_arrow().storage, pyarrow.lib.DoubleArray)
assert indexedarray.to_arrow().to_pylist() == [
0.0,
2.2,
4.4,
6.6,
8.8,
9.9,
7.7,
5.5,
]
def test_toarrow_IndexedOptionArray_2():
array = ak._v2.highlevel.Array([1.1, 2.2, 3.3, 4.4, 5.5, None]).layout
assert array.to_arrow().to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5, None]
assert array[:-1].to_arrow().to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5]
assert array[:1].to_arrow().to_pylist() == [1.1]
assert array[:0].to_arrow().to_pylist() == []
content = ak._v2.contents.NumpyArray(np.array([], dtype=np.float64))
index = ak._v2.index.Index32(np.array([-1, -1, -1, -1], dtype=np.int32))
indexedoptionarray = ak._v2.contents.IndexedOptionArray(index, content)
assert indexedoptionarray.to_arrow().to_pylist() == [None, None, None, None]
def test_toarrow_ByteMaskedArray_2():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
bytemaskedarray = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([True, True, False, False, False], dtype=np.int8)),
listoffsetarray,
True,
)
assert bytemaskedarray.to_arrow().to_pylist() == [
[0.0, 1.1, 2.2],
[],
None,
None,
None,
]
def test_toarrow_ByteMaskedArray_3():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
starts = ak._v2.index.Index64(np.array([0, 1]))
stops = ak._v2.index.Index64(np.array([2, 3]))
listarray = ak._v2.contents.ListArray(starts, stops, regulararray)
bytemaskedarray = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([True, False], dtype=np.int8)), listarray, True
)
assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)
def test_toarrow_ByteMaskedArray_4():
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "two", "2", "wonky"],
)
bytemaskedarray = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([True, False], dtype=np.int8)), recordarray, True
)
assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)
def test_toarrow_ByteMaskedArray_5():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
index = ak._v2.index.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
bytemaskedarray = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([True, False, False], dtype=np.int8)),
indexedarray,
True,
)
assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)
def test_toarrow_ByteMaskedArray_broken_unions_1():
content0 = ak._v2.highlevel.Array(
[[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]]
).layout
content1 = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1, 0, 0], dtype=np.int8))
index = ak._v2.index.Index32(
np.array([0, 1, 1, 0, 2, 2, 4, 3, 3, 4], dtype=np.int32)
)
unionarray = ak._v2.contents.UnionArray(tags, index, [content0, content1])
bytemaskedarray = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(
# tags 1, 1, 0, 0, 1, 0, 1, 1, 0, 0
# index 0, 1, 1, 0, 2, 2, 4, 3, 3, 4
np.array(
[True, False, False, True, False, True, True, False, False, True],
dtype=np.int8,
)
),
unionarray,
valid_when=True,
)
assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)
def test_toarrow_ByteMaskedArray_broken_unions_2():
content0 = ak._v2.highlevel.Array(
[[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]]
).layout
content1 = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))
tags = ak._v2.index.Index8(
np.array([1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0], dtype=np.int8)
)
index = ak._v2.index.Index32(
np.array([0, 1, 1, 0, 2, 2, 4, 3, 3, 4, 3], dtype=np.int32)
)
unionarray = ak._v2.contents.UnionArray(tags, index, [content0, content1])
bytemaskedarray = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(
# tags 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0
# index 0, 1, 1, 0, 2, 2, 4, 3, 3, 4, 3
np.array(
[True, False, False, True, False, True, True, False, False, True, True],
dtype=np.int8,
)
),
unionarray,
valid_when=True,
)
assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)
def test_toarrow_IndexedOptionArray():
ioa = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]),
ak._v2.contents.NumpyArray(
np.array(
[
5.2,
1.7,
6.7,
-0.4,
4.0,
7.8,
3.8,
6.8,
4.2,
0.3,
4.6,
6.2,
6.9,
-0.7,
3.9,
1.6,
8.7,
-0.7,
3.2,
4.3,
4.0,
5.8,
4.2,
7.0,
5.6,
3.8,
]
)
),
)
assert ioa.to_arrow().to_pylist() == to_list(ioa)
def test_fromarrow_NumpyArray_1():
boolarray = ak._v2.contents.NumpyArray(
np.array([True, True, True, False, False, True, False, True, False, True])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(boolarray.to_arrow())
) == to_list(boolarray)
def test_fromarrow_NumpyArray_2():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(content.to_arrow())) == to_list(
content
)
def test_fromarrow_ListOffsetArray():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow())
) == to_list(listoffsetarray)
def test_fromarrow_RegularArray():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow())
) == to_list(regulararray)
def test_fromarrow_RecordArray():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "chonks", "2", "wonky"],
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow())
) == to_list(recordarray)
def test_fromarrow_UnionArray():
content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))
index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))
array = ak._v2.contents.UnionArray(tags, index, [content0, content])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(array.to_arrow())) == to_list(
array
)
def test_chunkedarray():
a = pyarrow.chunked_array(
[
pyarrow.array([1.1, 2.2, 3.3]),
pyarrow.array([], pyarrow.float64()),
pyarrow.array([4.4, 5.5]),
pyarrow.array([6.6]),
pyarrow.array([], pyarrow.float64()),
pyarrow.array([], pyarrow.float64()),
pyarrow.array([7.7, 8.8, 9.9]),
]
)
assert a.to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
7.7,
8.8,
9.9,
]
def test_recordbatch():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"a": 1.1, "b": [1, 2, 3]},
{"a": 2.2, "b": []},
{"a": 3.3, "b": []},
{"a": 4.4, "b": [4, 5]},
{"a": 5.5, "b": [6]},
]
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, None, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"a": 1.1, "b": [1, None, 3]},
{"a": 2.2, "b": []},
{"a": 3.3, "b": []},
{"a": None, "b": [4, 5]},
{"a": 5.5, "b": [6]},
]
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),
pyarrow.array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
{"x": 1, "y": 1.1},
None,
None,
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
[None],
[{"x": 6, "y": 6.6}],
]
),
],
["a", "b", "c", "d", "e"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{
"a": 1.1,
"b": [1, 2, 3],
"c": {"x": 1, "y": 1.1},
"d": {"x": 1, "y": 1.1},
"e": [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
},
{"a": 2.2, "b": [], "c": {"x": 2, "y": 2.2}, "d": None, "e": []},
{
"a": 3.3,
"b": [4, 5],
"c": {"x": 3, "y": 3.3},
"d": None,
"e": [{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
},
{
"a": None,
"b": [None],
"c": {"x": 4, "y": None},
"d": {"x": 4, "y": None},
"e": [None],
},
{
"a": 5.5,
"b": [6],
"c": {"x": 5, "y": 5.5},
"d": {"x": 5, "y": 5.5},
"e": [{"x": 6, "y": 6.6}],
},
]
### All of the following tests were copied (translated) over from Awkward 0.
def test_arrow_toarrow_string():
a = ak._v2.operations.from_iter(["one", "two", "three"]).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == to_list(a)
a = ak._v2.operations.from_iter(
[["one", "two", "three"], [], ["four", "five"]]
).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == to_list(a)
if hasattr(pyarrow.BinaryArray, "from_buffers"):
a = ak._v2.operations.from_iter([b"one", b"two", b"three"]).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
b"one",
b"two",
b"three",
]
a = ak._v2.operations.from_iter(
[[b"one", b"two", b"three"], [], [b"four", b"five"]]
).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
[b"one", b"two", b"three"],
[],
[b"four", b"five"],
]
else:
a = ak._v2.operations.from_iter([b"one", b"two", b"three"]).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
"one",
"two",
"three",
]
a = ak._v2.operations.from_iter(
[[b"one", b"two", b"three"], [], [b"four", b"five"]]
).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
["one", "two", "three"],
[],
["four", "five"],
]
def test_arrow_array():
a = pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
1.1,
2.2,
3.3,
4.4,
5.5,
]
def test_arrow_boolean():
a = pyarrow.array([True, True, False, False, True])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
True,
True,
False,
False,
True,
]
def test_arrow_array_null():
a = pyarrow.array([1.1, 2.2, 3.3, None, 4.4, 5.5])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
1.1,
2.2,
3.3,
None,
4.4,
5.5,
]
def test_arrow_nested_array():
a = pyarrow.array([[1.1, 2.2, 3.3], [], [4.4, 5.5]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
]
def test_arrow_nested_nested_array():
a = pyarrow.array([[[1.1, 2.2], [3.3], []], [], [[4.4, 5.5]]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[[1.1, 2.2], [3.3], []],
[],
[[4.4, 5.5]],
]
def test_arrow_nested_array_null():
a = pyarrow.array([[1.1, 2.2, None], [], [4.4, 5.5]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[1.1, 2.2, None],
[],
[4.4, 5.5],
]
def test_arrow_null_nested_array_null():
a = pyarrow.array([[1.1, 2.2, None], [], None, [4.4, 5.5]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[1.1, 2.2, None],
[],
None,
[4.4, 5.5],
]
def test_arrow_chunked_array():
a = pyarrow.chunked_array(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([], pyarrow.float64()),
pyarrow.array([6.6, 7.7, 8.8]),
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
1.1,
2.2,
3.3,
4.4,
5.5,
6.6,
7.7,
8.8,
]
def test_arrow_struct():
a = pyarrow.array([{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
]
def test_arrow_struct_null():
a = pyarrow.array([{"x": 1, "y": 1.1}, {"x": 2, "y": None}, {"x": 3, "y": 3.3}])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": 1, "y": 1.1},
{"x": 2, "y": None},
{"x": 3, "y": 3.3},
]
def test_arrow_null_struct():
a = pyarrow.array(
[{"x": 1, "y": 1.1}, None, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": 1, "y": 1.1},
None,
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
]
def test_arrow_null_struct_null():
a = pyarrow.array(
[{"x": 1, "y": 1.1}, None, {"x": 2, "y": None}, {"x": 3, "y": 3.3}]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": 1, "y": 1.1},
None,
{"x": 2, "y": None},
{"x": 3, "y": 3.3},
]
def test_arrow_chunked_struct():
t = pyarrow.struct({"x": pyarrow.int64(), "y": pyarrow.float64()})
a = pyarrow.chunked_array(
[
pyarrow.array(
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}], t
),
pyarrow.array([], t),
pyarrow.array([{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}], t),
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": 4.4},
{"x": 5, "y": 5.5},
]
def test_arrow_nested_struct():
a = pyarrow.array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
def test_arrow_nested_struct_null():
a = pyarrow.array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": None}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[{"x": 1, "y": 1.1}, {"x": 2, "y": None}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
def test_arrow_null_nested_struct():
a = pyarrow.array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
None,
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
None,
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
def test_arrow_null_nested_struct_null():
a = pyarrow.array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": None}, {"x": 3, "y": 3.3}],
None,
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[{"x": 1, "y": 1.1}, {"x": 2, "y": None}, {"x": 3, "y": 3.3}],
None,
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
]
def test_arrow_struct_nested():
a = pyarrow.array(
[{"x": [], "y": 1.1}, {"x": [2], "y": 2.2}, {"x": [3, 3], "y": 3.3}]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": [], "y": 1.1},
{"x": [2], "y": 2.2},
{"x": [3, 3], "y": 3.3},
]
def test_arrow_struct_nested_null():
a = pyarrow.array(
[{"x": [], "y": 1.1}, {"x": [2], "y": 2.2}, {"x": [None, 3], "y": 3.3}]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{"x": [], "y": 1.1},
{"x": [2], "y": 2.2},
{"x": [None, 3], "y": 3.3},
]
def test_arrow_nested_struct_nested():
a = pyarrow.array(
[
[{"x": [], "y": 1.1}, {"x": [2], "y": 2.2}, {"x": [3, 3], "y": 3.3}],
[],
[{"x": [4, 4, 4], "y": 4.4}, {"x": [5, 5, 5, 5], "y": 5.5}],
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[{"x": [], "y": 1.1}, {"x": [2], "y": 2.2}, {"x": [3, 3], "y": 3.3}],
[],
[{"x": [4, 4, 4], "y": 4.4}, {"x": [5, 5, 5, 5], "y": 5.5}],
]
def test_arrow_null_nested_struct_nested_null():
a = pyarrow.array(
[
[{"x": [], "y": 1.1}, {"x": [2], "y": 2.2}, {"x": [None, 3], "y": 3.3}],
None,
[],
[{"x": [4, 4, 4], "y": 4.4}, {"x": [5, 5, 5, 5], "y": 5.5}],
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[{"x": [], "y": 1.1}, {"x": [2], "y": 2.2}, {"x": [None, 3], "y": 3.3}],
None,
[],
[{"x": [4, 4, 4], "y": 4.4}, {"x": [5, 5, 5, 5], "y": 5.5}],
]
def test_arrow_strings():
a = pyarrow.array(["one", "two", "three", "fo\u2014ur", "five"])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
"one",
"two",
"three",
"fo\u2014ur",
"five",
]
def test_arrow_strings_null():
a = pyarrow.array(["one", "two", None, "fo\u2014ur", "five"])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
"one",
"two",
None,
"fo\u2014ur",
"five",
]
def test_arrow_binary():
a = pyarrow.array([b"one", b"two", b"three", b"four", b"five"])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
b"one",
b"two",
b"three",
b"four",
b"five",
]
def test_arrow_binary_null():
a = pyarrow.array([b"one", b"two", None, b"four", b"five"])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
b"one",
b"two",
None,
b"four",
b"five",
]
def test_arrow_chunked_strings():
a = pyarrow.chunked_array(
[
pyarrow.array(["one", "two", "three", "four", "five"]),
pyarrow.array(["six", "seven", "eight"]),
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
]
def test_arrow_nested_strings():
a = pyarrow.array([["one", "two", "three"], [], ["four", "five"]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
["one", "two", "three"],
[],
["four", "five"],
]
def test_arrow_nested_strings_null():
a = pyarrow.array([["one", "two", None], [], ["four", "five"]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
["one", "two", None],
[],
["four", "five"],
]
def test_arrow_null_nested_strings_null():
a = pyarrow.array([["one", "two", None], [], None, ["four", "five"]])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
["one", "two", None],
[],
None,
["four", "five"],
]
def test_arrow_union_sparse():
a = pyarrow.UnionArray.from_sparse(
pyarrow.array([0, 1, 0, 0, 1], type=pyarrow.int8()),
[
pyarrow.array([0.0, 1.1, 2.2, 3.3, 4.4]),
pyarrow.array([True, True, False, True, False]),
],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
True,
2.2,
3.3,
False,
]
def test_arrow_union_sparse_null():
a = pyarrow.UnionArray.from_sparse(
pyarrow.array([0, 1, 0, 0, 1], type=pyarrow.int8()),
[
pyarrow.array([0.0, 1.1, None, 3.3, 4.4]),
pyarrow.array([True, True, False, True, False]),
],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
True,
None,
3.3,
False,
]
def test_arrow_union_sparse_null_null():
a = pyarrow.UnionArray.from_sparse(
pyarrow.array([0, 1, 0, 0, 1], type=pyarrow.int8()),
[
pyarrow.array([0.0, 1.1, None, 3.3, 4.4]),
pyarrow.array([True, None, False, True, False]),
],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
None,
None,
3.3,
False,
]
def test_arrow_union_dense():
a = pyarrow.UnionArray.from_dense(
pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()),
pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()),
[pyarrow.array([0.0, 1.1, 2.2, 3.3]), pyarrow.array([True, True, False])],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
True,
1.1,
2.2,
3.3,
True,
False,
]
def test_arrow_union_dense_null():
a = pyarrow.UnionArray.from_dense(
pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()),
pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()),
[pyarrow.array([0.0, 1.1, None, 3.3]), pyarrow.array([True, True, False])],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
True,
1.1,
None,
3.3,
True,
False,
]
def test_arrow_union_dense_null_null():
a = pyarrow.UnionArray.from_dense(
pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()),
pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()),
[pyarrow.array([0.0, 1.1, None, 3.3]), pyarrow.array([True, None, False])],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
True,
1.1,
None,
3.3,
None,
False,
]
def test_arrow_dictarray():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),
pyarrow.array(["one", "two", "three"]),
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
"one",
"one",
"three",
"three",
"two",
"one",
"three",
"two",
"two",
]
def test_arrow_dictarray_null():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, None, 1, None, 2, 1, 1]),
pyarrow.array(["one", "two", "three"]),
)
print(a)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
"one",
"one",
"three",
None,
"two",
None,
"three",
"two",
"two",
]
def test_arrow_null_dictarray():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),
pyarrow.array(["one", None, "three"]),
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
"one",
"one",
"three",
"three",
None,
"one",
"three",
None,
None,
]
def test_arrow_batch():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),
pyarrow.array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
{"x": 1, "y": 1.1},
None,
None,
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
[None],
[{"x": 6, "y": 6.6}],
]
),
],
["a", "b", "c", "d", "e"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{
"a": 1.1,
"b": [1, 2, 3],
"c": {"x": 1, "y": 1.1},
"d": {"x": 1, "y": 1.1},
"e": [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
},
{"a": 2.2, "b": [], "c": {"x": 2, "y": 2.2}, "d": None, "e": []},
{
"a": 3.3,
"b": [4, 5],
"c": {"x": 3, "y": 3.3},
"d": None,
"e": [{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
},
{
"a": None,
"b": [None],
"c": {"x": 4, "y": None},
"d": {"x": 4, "y": None},
"e": [None],
},
{
"a": 5.5,
"b": [6],
"c": {"x": 5, "y": 5.5},
"d": {"x": 5, "y": 5.5},
"e": [{"x": 6, "y": 6.6}],
},
]
def test_arrow_table():
a = pyarrow.Table.from_batches(
[
pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),
pyarrow.array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
{"x": 1, "y": 1.1},
None,
None,
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
],
[],
[{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
[None],
[{"x": 6, "y": 6.6}],
]
),
],
["a", "b", "c", "d", "e"],
),
pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),
pyarrow.array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
{"x": 1, "y": 1.1},
None,
None,
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
],
[],
[{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
[None],
[{"x": 6, "y": 6.6}],
]
),
],
["a", "b", "c", "d", "e"],
),
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
{
"a": 1.1,
"b": [1, 2, 3],
"c": {"x": 1, "y": 1.1},
"d": {"x": 1, "y": 1.1},
"e": [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
},
{"a": 2.2, "b": [], "c": {"x": 2, "y": 2.2}, "d": None, "e": []},
{
"a": 3.3,
"b": [4, 5],
"c": {"x": 3, "y": 3.3},
"d": None,
"e": [{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
},
{
"a": None,
"b": [None],
"c": {"x": 4, "y": None},
"d": {"x": 4, "y": None},
"e": [None],
},
{
"a": 5.5,
"b": [6],
"c": {"x": 5, "y": 5.5},
"d": {"x": 5, "y": 5.5},
"e": [{"x": 6, "y": 6.6}],
},
{
"a": 1.1,
"b": [1, 2, 3],
"c": {"x": 1, "y": 1.1},
"d": {"x": 1, "y": 1.1},
"e": [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
},
{"a": 2.2, "b": [], "c": {"x": 2, "y": 2.2}, "d": None, "e": []},
{
"a": 3.3,
"b": [4, 5],
"c": {"x": 3, "y": 3.3},
"d": None,
"e": [{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
},
{
"a": None,
"b": [None],
"c": {"x": 4, "y": None},
"d": {"x": 4, "y": None},
"e": [None],
},
{
"a": 5.5,
"b": [6],
"c": {"x": 5, "y": 5.5},
"d": {"x": 5, "y": 5.5},
"e": [{"x": 6, "y": 6.6}],
},
]
def test_arrow_nonnullable_table():
x = pyarrow.array([1, 2, 3])
y = pyarrow.array([1.1, 2.2, 3.3])
table = pyarrow.Table.from_arrays([x], ["x"])
if hasattr(pyarrow, "column"):
table2 = table.add_column(
1,
pyarrow.column(
pyarrow.field("y", y.type, False), np.array([1.1, 2.2, 3.3])
),
)
else:
table2 = table.add_column(1, "y", y)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(table2)) == [
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
]
def test_arrow_coverage100():
a = ak._v2.operations.from_iter(
[True, True, False, False, True, False, True, False]
).layout
assert a.to_arrow().to_pylist() == to_list(a)
a = ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellothere", "u1"), parameters={"__array__": "bytes"}
),
parameters={"__array__": "bytestring"},
)
assert a.to_arrow().to_pylist() == [b"hello", b"there"]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True, False, False, True, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellotherehellotherehellothere", "u1"),
parameters={"__array__": "bytes"},
),
parameters={"__array__": "bytestring"},
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == [
b"hello",
None,
b"hello",
b"there",
None,
None,
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellothere", "u1"), parameters={"__array__": "bytes"}
),
parameters={"__array__": "bytestring"},
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == [b"hello", None]
a = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], "i4")),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellothere", "u1"), parameters={"__array__": "bytes"}
),
parameters={"__array__": "bytestring"},
),
)
assert a.to_arrow().to_pylist() == [
None,
b"there",
None,
b"hello",
b"hello",
None,
]
a = ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellothere", "u1"), parameters={"__array__": "chars"}
),
parameters={"__array__": "string"},
)
assert a.to_arrow().to_pylist() == ["hello", "there"]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True, False, False, True, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellotherehellotherehellothere", "u1"),
parameters={"__array__": "chars"},
),
parameters={"__array__": "string"},
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == ["hello", None, "hello", "there", None, None]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
"hello",
None,
"hello",
"there",
None,
None,
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True, False, False, True, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], "i8")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellotherehellotherehellothere", "u1"),
parameters={"__array__": "chars"},
),
parameters={"__array__": "string"},
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == ["hello", None, "hello", "there", None, None]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
"hello",
None,
"hello",
"there",
None,
None,
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True, False, False, True, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], "i8")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellotherehellotherehellothere", "u1"),
parameters={"__array__": "bytes"},
),
parameters={"__array__": "bytestring"},
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == [
b"hello",
None,
b"hello",
b"there",
None,
None,
]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
b"hello",
None,
b"hello",
b"there",
None,
None,
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellothere", "u1"), parameters={"__array__": "chars"}
),
parameters={"__array__": "string"},
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == ["hello", None]
a = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], "i4")),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellothere", "u1"), parameters={"__array__": "chars"}
),
parameters={"__array__": "string"},
),
)
assert a.to_arrow().to_pylist() == [None, "there", None, "hello", "hello", None]
a = ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(np.frombuffer(b"hellothere", "u1")),
)
assert a.to_arrow().to_pylist() == [
[104, 101, 108, 108, 111],
[116, 104, 101, 114, 101],
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True, False, False, True, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], "i4")),
ak._v2.contents.NumpyArray(
np.frombuffer(b"hellotherehellotherehellothere", "u1")
),
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == [
[104, 101, 108, 108, 111],
None,
[104, 101, 108, 108, 111],
[116, 104, 101, 114, 101],
None,
None,
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, True])),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(np.frombuffer(b"hellothere", "u1")),
),
valid_when=False,
)
assert a.to_arrow().to_pylist() == [[104, 101, 108, 108, 111], None]
a = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], "i4")),
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(np.frombuffer(b"hellothere", "u1")),
),
)
assert a.to_arrow().to_pylist() == [
None,
[116, 104, 101, 114, 101],
None,
[104, 101, 108, 108, 111],
[104, 101, 108, 108, 111],
None,
]
a = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], "i4")),
ak._v2.contents.RegularArray(
ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
3,
zeros_length=0,
),
)
assert a.to_arrow().to_pylist() == [
None,
[4.4, 5.5, 6.6],
None,
[1.1, 2.2, 3.3],
[1.1, 2.2, 3.3],
None,
]
a = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1, 1, -1], "i4")),
ak._v2.contents.RegularArray(
ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
3,
zeros_length=0,
),
)
assert a.to_arrow().to_pylist() == [
None,
[4.4, 5.5, 6.6],
None,
[1.1, 2.2, 3.3],
[1.1, 2.2, 3.3],
None,
[4.4, 5.5, 6.6],
None,
]
a = ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index64(np.array([-1, 1, -1, 0, 0, -1, 1, -1], "i8")),
ak._v2.contents.RegularArray(
ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
3,
zeros_length=0,
),
)
assert a.to_arrow().to_pylist() == [
None,
[4.4, 5.5, 6.6],
None,
[1.1, 2.2, 3.3],
[1.1, 2.2, 3.3],
None,
[4.4, 5.5, 6.6],
None,
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([True, True, True, True, False, False])),
ak._v2.contents.IndexedOptionArray(
ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], "i4")),
ak._v2.contents.RegularArray(
ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),
3,
zeros_length=0,
),
),
valid_when=True,
)
assert a.to_arrow().to_pylist() == [
None,
[4.4, 5.5, 6.6],
None,
[1.1, 2.2, 3.3],
None,
None,
]
a = ak._v2.contents.UnmaskedArray(
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index32(np.array([0, 5, 10], "i4")),
ak._v2.contents.NumpyArray(np.frombuffer(b"hellothere", "u1")),
)
)
assert a.to_arrow().to_pylist() == [
[104, 101, 108, 108, 111],
[116, 104, 101, 114, 101],
]
a = pyarrow.array(
["one", "two", "three", "two", "two", "one", "three", "one"]
).dictionary_encode()
b = ak._v2._connect.pyarrow.handle_arrow(a)
assert isinstance(b, ak._v2.contents.IndexedOptionArray)
assert to_list(b) == ["one", "two", "three", "two", "two", "one", "three", "one"]
a = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], None, [4.4, 5.5]]).layout
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
[1.1, 2.2, 3.3],
[],
None,
[4.4, 5.5],
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, False, False, True, True, False, False])),
ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 999, 314, 4.4, 5.5])),
valid_when=False,
)
assert a.to_arrow().to_pylist() == [1.1, 2.2, 3.3, None, None, 4.4, 5.5]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([False, False, False, True, True, False, False])),
ak._v2.operations.from_iter(
[b"hello", b"", b"there", b"yuk", b"", b"o", b"hellothere"]
).layout,
valid_when=False,
)
assert a.to_arrow().to_pylist() == [
b"hello",
b"",
b"there",
None,
None,
b"o",
b"hellothere",
]
a = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8([True, True, False, True]),
ak._v2.operations.from_iter([[1.1, 2.2, 3.3], [], [999], [4.4, 5.5]]).layout,
valid_when=True,
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
[1.1, 2.2, 3.3],
[],
None,
[4.4, 5.5],
]
a = ak._v2.operations.from_iter([[1, 2, 3], [], [4, 5], 999, 123]).layout
assert a.to_arrow().to_pylist() == [[1, 2, 3], [], [4, 5], 999, 123]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
[1, 2, 3],
[],
[4, 5],
999,
123,
]
def test_arrow_coverage100_broken_unions():
a = ak._v2.operations.from_iter([[1, 2, 3], [], [4, 5], 999, 123]).layout
b = ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(np.array([True, True, False, False, True])),
a,
valid_when=True,
)
assert b.to_arrow().to_pylist() == [[1, 2, 3], [], None, None, 123]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(b.to_arrow())) == [
[1, 2, 3],
[],
None,
None,
123,
]
content1 = ak._v2.operations.from_iter([1.1, 2.2, 3.3, 4.4, 5.5]).layout
content2 = ak._v2.contents.NumpyArray(np.array([], dtype=np.int32))
a = ak._v2.contents.UnionArray(
ak._v2.index.Index8(np.array([0, 0, 0, 0, 0], "i1")),
ak._v2.index.Index32(np.array([0, 1, 2, 3, 4], "i4")),
[content1, content2],
)
assert to_list(a) == [1.1, 2.2, 3.3, 4.4, 5.5]
assert a.to_arrow().to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [
1.1,
2.2,
3.3,
4.4,
5.5,
]
a = pyarrow.UnionArray.from_sparse(
pyarrow.array([0, 0, 0, 0, 0], type=pyarrow.int8()),
[
pyarrow.array([0.0, 1.1, None, 3.3, 4.4]),
pyarrow.array([True, None, False, True, False]),
],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
0.0,
1.1,
None,
3.3,
4.4,
]
a = pyarrow.UnionArray.from_sparse(
pyarrow.array([0, 1, 0, 1, 1], "i1"),
[
pyarrow.array([[0.0, 1.1, 2.2], [], None, [5.5], [6.6, 7.7, 8.8, 9.9]]),
pyarrow.array([0.0, 1.1, 2.2, None, None]),
],
["0", "1"],
[0, 1],
)
assert a.to_pylist() == [[0.0, 1.1, 2.2], 1.1, None, None, None]
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
[0.0, 1.1, 2.2],
1.1,
None,
None,
None,
]
a = pyarrow.chunked_array([pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5])])
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [
1.1,
2.2,
3.3,
4.4,
5.5,
]
# NumpyArray in Awkward Arrays translate to their corresponding DataType Arrays in Arrow
def test_nonzero_offset_fromarrow_NumpyArray_1():
boolarray = ak._v2.contents.NumpyArray(
np.array([True, True, True, False, False, True, False, True, False, True])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(boolarray.to_arrow()[5:])
) == pyarrow.Array.to_pylist(boolarray.to_arrow()[5:])
def test_nonzero_offset_fromarrow_NumpyArray_2():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[2:])
) == pyarrow.Array.to_pylist(content.to_arrow()[2:])
def test_nonzero_offset_fromarrow_NumpyArray_3():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[2:5])
) == pyarrow.Array.to_pylist(content.to_arrow()[2:5])
def test_nonzero_offset_fromarrow_NumpyArray_4():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[0:9:2])
) == pyarrow.Array.to_pylist(content.to_arrow()[0:9:2])
def test_nonzero_offset_fromarrow_NumpyArray_5():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[-2:10])
) == pyarrow.Array.to_pylist(content.to_arrow()[-2:10])
def test_nonzero_offset_fromarrow_NumpyArray_6():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[-3:3:-1])
) == pyarrow.Array.to_pylist(content.to_arrow()[-3:3:-1])
# ListOffsetArrays in Awkward Arrays translate to ListArrays in Arrow
def test_nonzero_offset_fromarrow_ListOffsetArray_1():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[2:])
) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[2:])
def test_nonzero_offset_fromarrow_ListOffsetArray_2():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[2:5])
) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[2:5])
def test_nonzero_offset_fromarrow_ListOffsetArray_3():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[0:5:2])
) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[0:5:2])
def test_nonzero_offset_fromarrow_ListOffsetArray_4():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[-3:3:-1])
) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[-3:3:-1])
# RegularArrays in Awkward Arrays translate to ListArrays in Arrow
def test_nonzero_offset_fromarrow_RegularArray_1():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[2:])
) == pyarrow.Array.to_pylist(regulararray.to_arrow()[2:])
def test_nonzero_offset_fromarrow_RegularArray_2():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[2:5])
) == pyarrow.Array.to_pylist(regulararray.to_arrow()[2:5])
def test_nonzero_offset_fromarrow_RegularArray_3():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[0:5:2])
) == pyarrow.Array.to_pylist(regulararray.to_arrow()[0:5:2])
def test_nonzero_offset_fromarrow_RegularArray_4():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[-3:3:-1])
) == pyarrow.Array.to_pylist(regulararray.to_arrow()[-3:3:-1])
# RecordArrays in Awkward Arrays translate to Struct Arrays in Arrow
def test_nonzero_offset_fromarrow_RecordArray_1():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "chonks", "2", "wonky"],
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[2:])
) == pyarrow.Array.to_pylist(recordarray.to_arrow()[2:])
def test_nonzero_offset_fromarrow_RecordArray_2():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "chonks", "2", "wonky"],
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[2:5])
) == pyarrow.Array.to_pylist(recordarray.to_arrow()[2:5])
def test_nonzero_offset_fromarrow_RecordArray_3():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "chonks", "2", "wonky"],
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[0:5:2])
) == pyarrow.Array.to_pylist(recordarray.to_arrow()[0:5:2])
def test_nonzero_offset_fromarrow_RecordArray_4():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "chonks", "2", "wonky"],
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[-3:3:-1])
) == pyarrow.Array.to_pylist(recordarray.to_arrow()[-3:3:-1])
def test_nonzero_offset_fromarrow_RecordArray_4_again():
content = ak._v2.contents.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)
content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak._v2.contents.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak._v2.contents.RecordArray(
[content1, listoffsetarray, content2, content1],
fields=["one", "chonks", "2", "wonky"],
)
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[-3:3:-1])
) == pyarrow.Array.to_pylist(recordarray.to_arrow()[-3:3:-1])
| ).layout
tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))
index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))
array = ak._v2.contents.UnionArray(tags, index, [content0, content])
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(array.to_arrow()[2:])
) == pyarrow.Array.to_pylist(array.to_arrow()[2:])
def test_nonzero_offset_fromarrow_UnionArray_2():
content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))
index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))
array = ak._v2.contents.UnionArray(tags, index, [content0, content])
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(array.to_arrow()[2:5])
) == pyarrow.Array.to_pylist(array.to_arrow()[2:5])
def test_nonzero_offset_fromarrow_UnionArray_3():
content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))
index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))
array = ak._v2.contents.UnionArray(tags, index, [content0, content])
assert to_list(
ak._v2._connect.pyarrow.handle_arrow(array.to_arrow()[0:5:1])
) == pyarrow.Array.to_pylist(array.to_arrow()[0:5:1])
def test_nonzero_offset_fromarrow_ArrowDictionaryArray_1():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),
pyarrow.array(["one", None, "three"]),
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:])) == [
"three",
"three",
None,
"one",
"three",
None,
None,
]
def test_nonzero_offset_fromarrow_ArrowDictionaryArray_2():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),
pyarrow.array(["one", None, "three"]),
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:5])) == [
"three",
"three",
None,
]
def test_nonzero_offset_fromarrow_ArrowDictionaryArray_3():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),
pyarrow.array(["one", None, "three"]),
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0:8:2])) == [
"one",
"three",
None,
"three",
]
def test_nonzero_offset_fromarrow_ArrowDictionaryArray_4():
a = pyarrow.DictionaryArray.from_arrays(
pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),
pyarrow.array(["one", None, "three"]),
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[-3:3:-1])) == [
"three",
"one",
None,
]
def test_nonzero_offset_fromarrow_ArrowRecordBatch_1():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0])) == a[0].to_pylist()
def test_nonzero_offset_fromarrow_ArrowRecordBatch_2():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:])) == [
{"a": 3.3, "b": []},
{"a": 4.4, "b": [4, 5]},
{"a": 5.5, "b": [6]},
]
def test_nonzero_offset_fromarrow_ArrowRecordBatch_3():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:5])) == [
{"a": 3.3, "b": []},
{"a": 4.4, "b": [4, 5]},
{"a": 5.5, "b": [6]},
]
def test_nonzero_offset_fromarrow_ArrowRecordBatch_4():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0:5:2])) == [
{"a": 1.1, "b": [1, 2, 3]},
{"a": 3.3, "b": []},
{"a": 5.5, "b": [6]},
]
def test_nonzero_offset_fromarrow_ArrowRecordBatch_4_again():
a = pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),
pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),
],
["a", "b"],
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[-2:0:-1])) == [
{"a": 4.4, "b": [4, 5]},
{"a": 3.3, "b": []},
{"a": 2.2, "b": []},
]
def test_nonzero_offset_fromarrow_ArrowTable_1():
a = pyarrow.Table.from_batches(
[
pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),
pyarrow.array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
{"x": 1, "y": 1.1},
None,
None,
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
],
[],
[{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
[None],
[{"x": 6, "y": 6.6}],
]
),
],
["a", "b", "c", "d", "e"],
),
pyarrow.RecordBatch.from_arrays(
[
pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),
pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),
pyarrow.array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
{"x": 1, "y": 1.1},
None,
None,
{"x": 4, "y": None},
{"x": 5, "y": 5.5},
]
),
pyarrow.array(
[
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
],
[],
[{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
[None],
[{"x": 6, "y": 6.6}],
]
),
],
["a", "b", "c", "d", "e"],
),
]
)
assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0:5:2])) == [
{
"a": 1.1,
"b": [1, 2, 3],
"c": {"x": 1, "y": 1.1},
"d": {"x": 1, "y": 1.1},
"e": [{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
},
{
"a": 3.3,
"b": [4, 5],
"c": {"x": 3, "y": 3.3},
"d": None,
"e": [{"x": 4, "y": None}, {"x": 5, "y": 5.5}],
},
{
"a": 5.5,
"b": [6],
"c": {"x": 5, "y": 5.5},
"d": {"x": 5, "y": 5.5},
"e": [{"x": 6, "y": 6.6}],
},
] | def test_nonzero_offset_fromarrow_UnionArray_1():
content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
content = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] |
dispatcher.rs | use std::{
collections::VecDeque,
fmt,
future::Future,
io, mem, net,
pin::Pin,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed, FramedParts};
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_service::Service;
use bitflags::bitflags;
use bytes::{Buf, BytesMut};
use log::{error, trace};
use pin_project::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error};
use crate::error::{ParseError, PayloadError};
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpFlow;
use crate::OnConnectData;
use super::codec::Codec;
use super::payload::{Payload, PayloadSender, PayloadStatus};
use super::{Message, MessageType};
const LW_BUFFER_SIZE: usize = 1024;
const HW_BUFFER_SIZE: usize = 1024 * 8;
const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! {
pub struct Flags: u8 {
const STARTED = 0b0000_0001;
const KEEPALIVE = 0b0000_0010;
const SHUTDOWN = 0b0000_0100;
const READ_DISCONNECT = 0b0000_1000;
const WRITE_DISCONNECT = 0b0001_0000;
}
}
#[pin_project]
/// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S, B, X, U>
where
S: Service<Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
#[pin]
inner: DispatcherState<T, S, B, X, U>,
#[cfg(test)]
poll_count: u64,
}
#[pin_project(project = DispatcherStateProj)]
enum DispatcherState<T, S, B, X, U>
where
S: Service<Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
Normal(#[pin] InnerDispatcher<T, S, B, X, U>),
Upgrade(#[pin] U::Future),
}
#[pin_project(project = InnerDispatcherProj)]
struct InnerDispatcher<T, S, B, X, U>
where
S: Service<Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
flow: Rc<HttpFlow<S, X, U>>,
on_connect_data: OnConnectData,
flags: Flags,
peer_addr: Option<net::SocketAddr>,
error: Option<DispatchError>,
#[pin]
state: State<S, B, X>,
payload: Option<PayloadSender>,
messages: VecDeque<DispatcherMessage>,
ka_expire: Instant,
#[pin]
ka_timer: Option<Sleep>,
io: Option<T>,
read_buf: BytesMut,
write_buf: BytesMut,
codec: Codec,
}
enum DispatcherMessage {
Item(Request),
Upgrade(Request),
Error(Response<()>),
}
#[pin_project(project = StateProj)]
enum State<S, B, X>
where
S: Service<Request>,
X: Service<Request, Response = Request>,
B: MessageBody,
{
None,
ExpectCall(#[pin] X::Future),
ServiceCall(#[pin] S::Future),
SendPayload(#[pin] ResponseBody<B>),
}
impl<S, B, X> State<S, B, X>
where
S: Service<Request>,
X: Service<Request, Response = Request>,
B: MessageBody,
{
fn is_empty(&self) -> bool {
matches!(self, State::None)
}
}
enum PollResponse {
Upgrade(Request),
DoNothing,
DrainWriteBuf,
}
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
/// Create HTTP/1 dispatcher.
pub(crate) fn new(
io: T,
config: ServiceConfig,
flow: Rc<HttpFlow<S, X, U>>,
on_connect_data: OnConnectData,
peer_addr: Option<net::SocketAddr>,
) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE
} else {
Flags::empty()
};
// keep-alive timer
let (ka_expire, ka_timer) = match config.keep_alive_timer() {
Some(delay) => (delay.deadline(), Some(delay)),
None => (config.now(), None),
};
Dispatcher {
inner: DispatcherState::Normal(InnerDispatcher {
read_buf: BytesMut::with_capacity(HW_BUFFER_SIZE),
write_buf: BytesMut::with_capacity(HW_BUFFER_SIZE),
payload: None,
state: State::None,
error: None,
messages: VecDeque::new(),
io: Some(io),
codec: Codec::new(config),
flow,
on_connect_data,
flags,
peer_addr,
ka_expire,
ka_timer,
}),
#[cfg(test)]
poll_count: 0,
}
}
}
impl<T, S, B, X, U> InnerDispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn can_read(&self, cx: &mut Context<'_>) -> bool {
if self.flags.contains(Flags::READ_DISCONNECT) {
false
} else if let Some(ref info) = self.payload {
info.need_read(cx) == PayloadStatus::Read
} else {
true
}
}
// if checked is set to true, delay disconnect until all tasks have finished.
fn client_disconnected(self: Pin<&mut Self>) {
let this = self.project();
this.flags
.insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT);
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
}
/// Flush stream
///
/// true - got WouldBlock
/// false - didn't get WouldBlock
fn poll_flush(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
let InnerDispatcherProj { io, write_buf, .. } = self.project();
let mut io = Pin::new(io.as_mut().unwrap());
let len = write_buf.len();
let mut written = 0;
while written < len {
match io.as_mut().poll_write(cx, &write_buf[written..]) {
Poll::Ready(Ok(0)) => {
return Err(DispatchError::Io(io::Error::new(
io::ErrorKind::WriteZero,
"",
)))
}
Poll::Ready(Ok(n)) => written += n,
Poll::Pending => {
write_buf.advance(written);
return Ok(true);
}
Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)),
}
}
// everything has written to io. clear buffer.
write_buf.clear();
// flush the io and check if get blocked.
let blocked = io.poll_flush(cx)?.is_pending();
Ok(blocked)
}
fn send_response(
self: Pin<&mut Self>,
message: Response<()>,
body: ResponseBody<B>,
) -> Result<(), DispatchError> {
let size = body.size();
let mut this = self.project();
this.codec
.encode(Message::Item((message, size)), &mut this.write_buf)
.map_err(|err| {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
DispatchError::Io(err)
})?;
this.flags.set(Flags::KEEPALIVE, this.codec.keepalive());
match size {
BodySize::None | BodySize::Empty => this.state.set(State::None),
_ => this.state.set(State::SendPayload(body)),
};
Ok(())
}
fn send_continue(self: Pin<&mut Self>) {
self.project()
.write_buf
.extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n");
}
fn poll_response(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<PollResponse, DispatchError> {
'res: loop {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
// no future is in InnerDispatcher state. pop next message.
StateProj::None => match this.messages.pop_front() {
// handle request message.
Some(DispatcherMessage::Item(req)) => {
// Handle `EXPECT: 100-Continue` header
if req.head().expect() {
// set InnerDispatcher state and continue loop to poll it.
let task = this.flow.expect.call(req);
this.state.set(State::ExpectCall(task));
} else {
// the same as expect call.
let task = this.flow.service.call(req);
this.state.set(State::ServiceCall(task));
};
}
// handle error message.
Some(DispatcherMessage::Error(res)) => {
// send_response would update InnerDispatcher state to SendPayload or
// None(If response body is empty).
// continue loop to poll it.
self.as_mut()
.send_response(res, ResponseBody::Other(Body::Empty))?;
}
// return with upgrade request and poll it exclusively.
Some(DispatcherMessage::Upgrade(req)) => {
return Ok(PollResponse::Upgrade(req));
}
// all messages are dealt with.
None => return Ok(PollResponse::DoNothing),
},
StateProj::ServiceCall(fut) => match fut.poll(cx) {
// service call resolved. send response.
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.as_mut().send_response(res, body)?;
}
// send service call error as response
Poll::Ready(Err(err)) => {
let res: Response = err.into().into();
let (res, body) = res.replace_body(());
self.as_mut().send_response(res, body.into_body())?;
}
// service call pending and could be waiting for more chunk messages.
// (pipeline message limit and/or payload can_read limit)
Poll::Pending => {
// no new message is decoded and no new payload is feed.
// nothing to do except waiting for new incoming data from client.
if !self.as_mut().poll_request(cx)? {
return Ok(PollResponse::DoNothing);
}
// otherwise keep loop.
}
},
StateProj::SendPayload(mut stream) => {
// keep populate writer buffer until buffer size limit hit,
// get blocked or finished.
while this.write_buf.len() < super::payload::MAX_BUFFER_SIZE {
match stream.as_mut().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => {
this.codec.encode(
Message::Chunk(Some(item)),
&mut this.write_buf,
)?;
}
Poll::Ready(None) => {
this.codec
.encode(Message::Chunk(None), &mut this.write_buf)?;
// payload stream finished.
// set state to None and handle next message
this.state.set(State::None);
continue 'res;
}
Poll::Ready(Some(Err(err))) => {
return Err(DispatchError::Service(err))
}
Poll::Pending => return Ok(PollResponse::DoNothing),
}
}
// buffer is beyond max size.
// return and try to write the whole buffer to io stream.
return Ok(PollResponse::DrainWriteBuf);
}
StateProj::ExpectCall(fut) => match fut.poll(cx) {
// expect resolved. write continue to buffer and set InnerDispatcher state
// to service call.
Poll::Ready(Ok(req)) => {
this.write_buf
.extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n");
let fut = this.flow.service.call(req);
this.state.set(State::ServiceCall(fut));
}
// send expect error as response
Poll::Ready(Err(err)) => {
let res: Response = err.into().into();
let (res, body) = res.replace_body(());
self.as_mut().send_response(res, body.into_body())?;
}
// expect must be solved before progress can be made.
Poll::Pending => return Ok(PollResponse::DoNothing),
},
}
}
}
fn handle_request(
mut self: Pin<&mut Self>,
req: Request,
cx: &mut Context<'_>,
) -> Result<(), DispatchError> {
// Handle `EXPECT: 100-Continue` header
if req.head().expect() {
// set dispatcher state so the future is pinned.
let mut this = self.as_mut().project();
let task = this.flow.expect.call(req);
this.state.set(State::ExpectCall(task));
} else {
// the same as above.
let mut this = self.as_mut().project();
let task = this.flow.service.call(req);
this.state.set(State::ServiceCall(task));
};
// eagerly poll the future for once(or twice if expect is resolved immediately).
loop {
match self.as_mut().project().state.project() {
StateProj::ExpectCall(fut) => {
match fut.poll(cx) {
// expect is resolved. continue loop and poll the service call branch.
Poll::Ready(Ok(req)) => {
self.as_mut().send_continue();
let mut this = self.as_mut().project();
let task = this.flow.service.call(req);
this.state.set(State::ServiceCall(task));
continue;
}
// future is pending. return Ok(()) to notify that a new state is
// set and the outer loop should be continue.
Poll::Pending => return Ok(()),
// future is error. send response and return a result. On success
// to notify the dispatcher a new state is set and the outer loop
// should be continue.
Poll::Ready(Err(err)) => {
let err = err.into();
let res: Response = err.into();
let (res, body) = res.replace_body(());
return self.send_response(res, body.into_body());
}
}
}
StateProj::ServiceCall(fut) => {
// return no matter the service call future's result.
return match fut.poll(cx) {
// future is resolved. send response and return a result. On success
// to notify the dispatcher a new state is set and the outer loop
// should be continue.
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.send_response(res, body)
}
// see the comment on ExpectCall state branch's Pending.
Poll::Pending => Ok(()),
// see the comment on ExpectCall state branch's Ready(Err(err)).
Poll::Ready(Err(err)) => {
let res: Response = err.into().into();
let (res, body) = res.replace_body(());
self.send_response(res, body.into_body())
}
};
}
_ => unreachable!(
"State must be set to ServiceCall or ExceptCall in handle_request"
),
}
}
}
/// Process one incoming request.
fn poll_request(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
// limit amount of non-processed requests
if self.messages.len() >= MAX_PIPELINED_MESSAGES || !self.can_read(cx) {
return Ok(false);
}
let mut updated = false;
let mut this = self.as_mut().project();
loop {
match this.codec.decode(&mut this.read_buf) {
Ok(Some(msg)) => {
updated = true;
this.flags.insert(Flags::STARTED);
match msg {
Message::Item(mut req) => {
let pl = this.codec.message_type();
req.head_mut().peer_addr = *this.peer_addr;
// merge on_connect_ext data into request extensions
this.on_connect_data.merge_into(&mut req);
if pl == MessageType::Stream && this.flow.upgrade.is_some() {
this.messages.push_back(DispatcherMessage::Upgrade(req));
break;
}
if pl == MessageType::Payload || pl == MessageType::Stream {
let (ps, pl) = Payload::create(false);
let (req1, _) =
req.replace_payload(crate::Payload::H1(pl));
req = req1;
*this.payload = Some(ps);
}
// handle request early
if this.state.is_empty() {
self.as_mut().handle_request(req, cx)?;
this = self.as_mut().project();
} else {
this.messages.push_back(DispatcherMessage::Item(req));
}
}
Message::Chunk(Some(chunk)) => {
if let Some(ref mut payload) = this.payload {
payload.feed_data(chunk);
} else {
error!(
"Internal server error: unexpected payload chunk"
);
this.flags.insert(Flags::READ_DISCONNECT);
this.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
*this.error = Some(DispatchError::InternalError);
break;
}
}
Message::Chunk(None) => {
if let Some(mut payload) = this.payload.take() {
payload.feed_eof();
} else {
error!("Internal server error: unexpected eof");
this.flags.insert(Flags::READ_DISCONNECT);
this.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
*this.error = Some(DispatchError::InternalError);
break;
}
}
}
}
// decode is partial and buffer is not full yet.
// break and wait for more read.
Ok(None) => break,
Err(ParseError::Io(err)) => {
self.as_mut().client_disconnected();
this = self.as_mut().project();
*this.error = Some(DispatchError::Io(err));
break;
}
Err(ParseError::TooLarge) => {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Overflow);
}
// Requests overflow buffer size should be responded with 431
this.messages.push_back(DispatcherMessage::Error(
Response::RequestHeaderFieldsTooLarge().finish().drop_body(),
));
this.flags.insert(Flags::READ_DISCONNECT);
*this.error = Some(ParseError::TooLarge.into());
break;
}
Err(err) => {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::EncodingCorrupted);
}
// Malformed requests should be responded with 400
this.messages.push_back(DispatcherMessage::Error(
Response::BadRequest().finish().drop_body(),
));
this.flags.insert(Flags::READ_DISCONNECT);
*this.error = Some(err.into());
break;
}
}
}
if updated && this.ka_timer.is_some() {
if let Some(expire) = this.codec.config().keep_alive_expire() {
*this.ka_expire = expire;
}
}
Ok(updated)
}
/// keep-alive timer
fn poll_keepalive(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<(), DispatchError> {
let mut this = self.as_mut().project();
// when a branch is not explicit return early it's meant to fall through
// and return as Ok(())
match this.ka_timer.as_mut().as_pin_mut() {
None => {
// conditionally go into shutdown timeout
if this.flags.contains(Flags::SHUTDOWN) {
if let Some(deadline) = this.codec.config().client_disconnect_timer()
{
// write client disconnect time out and poll again to
// go into Some<Pin<&mut Sleep>> branch
this.ka_timer.set(Some(sleep_until(deadline)));
return self.poll_keepalive(cx);
} else {
this.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
}
}
}
Some(mut timer) => {
// only operate when keep-alive timer is resolved.
if timer.as_mut().poll(cx).is_ready() {
// got timeout during shutdown, drop connection
if this.flags.contains(Flags::SHUTDOWN) {
return Err(DispatchError::DisconnectTimeout);
// exceed deadline. check for any outstanding tasks
} else if timer.deadline() >= *this.ka_expire {
// have no task at hand.
if this.state.is_empty() && this.write_buf.is_empty() {
if this.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
this.flags.insert(Flags::SHUTDOWN);
// start shutdown timeout
if let Some(deadline) =
this.codec.config().client_disconnect_timer()
{
timer.as_mut().reset(deadline);
let _ = timer.poll(cx);
} else {
// no shutdown timeout, drop socket
this.flags.insert(Flags::WRITE_DISCONNECT);
}
} else {
// timeout on first request (slow request) return 408
if !this.flags.contains(Flags::STARTED) {
trace!("Slow request timeout");
let _ = self.as_mut().send_response(
Response::RequestTimeout().finish().drop_body(),
ResponseBody::Other(Body::Empty),
);
this = self.project();
} else {
trace!("Keep-alive connection timeout");
}
this.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
this.state.set(State::None);
}
// still have unfinished task. try to reset and register keep-alive.
} else if let Some(deadline) =
this.codec.config().keep_alive_expire()
{
timer.as_mut().reset(deadline);
let _ = timer.poll(cx);
}
// timer resolved but still have not met the keep-alive expire deadline.
// reset and register for later wakeup.
} else {
timer.as_mut().reset(*this.ka_expire);
let _ = timer.poll(cx);
}
}
}
}
Ok(())
}
/// Returns true when io stream can be disconnected after write to it.
///
/// It covers these conditions:
///
/// - `std::io::ErrorKind::ConnectionReset` after partial read.
/// - all data read done.
#[inline(always)]
fn | (
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
let this = self.project();
if this.flags.contains(Flags::READ_DISCONNECT) {
return Ok(false);
};
let mut io = Pin::new(this.io.as_mut().unwrap());
let mut read_some = false;
loop {
// Return early when read buf exceed decoder's max buffer size.
if this.read_buf.len() >= super::decoder::MAX_BUFFER_SIZE {
/*
At this point it's not known IO stream is still scheduled
to be waked up. so force wake up dispatcher just in case.
Reason:
AsyncRead mostly would only have guarantee wake up
when the poll_read return Poll::Pending.
Case:
When read_buf is beyond max buffer size the early return
could be successfully be parsed as a new Request.
This case would not generate ParseError::TooLarge
and at this point IO stream is not fully read to Pending
and would result in dispatcher stuck until timeout (KA)
Note:
This is a perf choice to reduce branch on
<Request as MessageType>::decode.
A Request head too large to parse is only checked on
httparse::Status::Partial condition.
*/
if this.payload.is_none() {
/*
When dispatcher has a payload the responsibility of
wake up it would be shift to h1::payload::Payload.
Reason:
Self wake up when there is payload would waste poll
and/or result in over read.
Case:
When payload is (partial) dropped by user there is
no need to do read anymore.
At this case read_buf could always remain beyond
MAX_BUFFER_SIZE and self wake up would be busy poll
dispatcher and waste resource.
*/
cx.waker().wake_by_ref();
}
return Ok(false);
}
// grow buffer if necessary.
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW_BUFFER_SIZE {
this.read_buf.reserve(HW_BUFFER_SIZE - remaining);
}
match actix_codec::poll_read_buf(io.as_mut(), cx, this.read_buf) {
Poll::Ready(Ok(n)) => {
if n == 0 {
return Ok(true);
}
read_some = true;
}
Poll::Pending => return Ok(false),
Poll::Ready(Err(err)) => {
return match err.kind() {
io::ErrorKind::WouldBlock => Ok(false),
io::ErrorKind::ConnectionReset if read_some => Ok(true),
_ => Err(DispatchError::Io(err)),
}
}
}
}
}
/// call upgrade service with request.
fn upgrade(self: Pin<&mut Self>, req: Request) -> U::Future {
let this = self.project();
let mut parts = FramedParts::with_read_buf(
this.io.take().unwrap(),
mem::take(this.codec),
mem::take(this.read_buf),
);
parts.write_buf = mem::take(this.write_buf);
let framed = Framed::from_parts(parts);
this.flow.upgrade.as_ref().unwrap().call((req, framed))
}
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Error>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
type Output = Result<(), DispatchError>;
#[inline]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
#[cfg(test)]
{
*this.poll_count += 1;
}
match this.inner.project() {
DispatcherStateProj::Normal(mut inner) => {
inner.as_mut().poll_keepalive(cx)?;
if inner.flags.contains(Flags::SHUTDOWN) {
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
Poll::Ready(Ok(()))
} else {
// flush buffer and wait on block.
if inner.as_mut().poll_flush(cx)? {
Poll::Pending
} else {
Pin::new(inner.project().io.as_mut().unwrap())
.poll_shutdown(cx)
.map_err(DispatchError::from)
}
}
} else {
// read from io stream and fill read buffer.
let should_disconnect = inner.as_mut().read_available(cx)?;
inner.as_mut().poll_request(cx)?;
// io stream should to be closed.
if should_disconnect {
let inner = inner.as_mut().project();
inner.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner.payload.take() {
payload.feed_eof();
}
};
loop {
// poll_response and populate write buffer.
// drain indicate if write buffer should be emptied before next run.
let drain = match inner.as_mut().poll_response(cx)? {
PollResponse::DrainWriteBuf => true,
PollResponse::DoNothing => false,
// upgrade request and goes Upgrade variant of DispatcherState.
PollResponse::Upgrade(req) => {
let upgrade = inner.upgrade(req);
self.as_mut()
.project()
.inner
.set(DispatcherState::Upgrade(upgrade));
return self.poll(cx);
}
};
// we didn't get WouldBlock from write operation,
// so data get written to kernel completely (macOS)
// and we have to write again otherwise response can get stuck
//
// TODO: what? is WouldBlock good or bad?
// want to find a reference for this macOS behavior
if inner.as_mut().poll_flush(cx)? || !drain {
break;
}
}
// client is gone
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
return Poll::Ready(Ok(()));
}
let is_empty = inner.state.is_empty();
let inner_p = inner.as_mut().project();
// read half is closed and we do not processing any responses
if inner_p.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner_p.flags.insert(Flags::SHUTDOWN);
}
// keep-alive and stream errors
if is_empty && inner_p.write_buf.is_empty() {
if let Some(err) = inner_p.error.take() {
Poll::Ready(Err(err))
}
// disconnect if keep-alive is not enabled
else if inner_p.flags.contains(Flags::STARTED)
&& !inner_p.flags.intersects(Flags::KEEPALIVE)
{
inner_p.flags.insert(Flags::SHUTDOWN);
self.poll(cx)
}
// disconnect if shutdown
else if inner_p.flags.contains(Flags::SHUTDOWN) {
self.poll(cx)
} else {
Poll::Pending
}
} else {
Poll::Pending
}
}
}
DispatcherStateProj::Upgrade(fut) => fut.poll(cx).map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
}),
}
}
}
#[cfg(test)]
mod tests {
use std::str;
use actix_service::fn_service;
use futures_util::future::{lazy, ready};
use super::*;
use crate::test::TestBuffer;
use crate::{error::Error, KeepAlive};
use crate::{
h1::{ExpectHandler, UpgradeHandler},
test::TestSeqBuffer,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
haystack[from..]
.windows(needle.len())
.position(|window| window == needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(&payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request, Response = Response, Error = Error> {
fn_service(|_req: Request| ready(Ok::<_, Error>(Response::Ok().finish())))
}
fn echo_path_service() -> impl Service<Request, Response = Response, Error = Error> {
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(Response::Ok().body(Body::from_slice(path))))
})
}
fn echo_payload_service() -> impl Service<Request, Response = Response, Error = Error>
{
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::stream::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().chunk())
}
Ok::<_, Error>(Response::Ok().body(body))
})
})
}
#[actix_rt::test]
async fn test_req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf,
ServiceConfig::default(),
services,
OnConnectData::default(),
None,
);
actix_rt::pin!(h1);
match h1.as_mut().poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherStateProj::Normal(inner) = h1.project().inner.project() {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(
&inner.project().io.take().unwrap().write_buf[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;
}
#[actix_rt::test]
async fn test_pipelining() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(KeepAlive::Disabled, 1, 1, false, None);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf,
cfg,
services,
OnConnectData::default(),
None,
);
actix_rt::pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal(inner) = h1.project().inner.project() {
let res = &mut inner.project().io.take().unwrap().write_buf[..];
stabilize_date_header(res);
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(res.to_vec(), exp.to_vec());
}
})
.await;
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(KeepAlive::Disabled, 1, 1, false, None);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf,
cfg,
services,
OnConnectData::default(),
None,
);
actix_rt::pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
if let DispatcherStateProj::Normal(inner) = h1.project().inner.project() {
let res = &mut inner.project().io.take().unwrap().write_buf[..];
stabilize_date_header(res);
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(res.to_vec(), exp.to_vec());
}
})
.await;
}
#[actix_rt::test]
async fn test_expect() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None);
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
cfg,
services,
OnConnectData::default(),
None,
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
actix_rt::pin!(h1);
assert!(h1.as_mut().poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
// polls: manual
assert_eq!(h1.poll_count, 1);
eprintln!("poll count: {}", h1.poll_count);
if let DispatcherState::Normal(ref inner) = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal(ref inner) = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn test_eager_expect() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
cfg,
services,
OnConnectData::default(),
None,
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
actix_rt::pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal(ref inner) = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn test_upgrade() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None);
let services =
HttpFlow::new(ok_service(), ExpectHandler, Some(UpgradeHandler));
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
cfg,
services,
OnConnectData::default(),
None,
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
actix_rt::pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade(_)));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
}
| read_available |
tests.rs | use crate::{Error, mock::*};
use frame_support::{assert_ok, assert_noop};
use super::*;
//创建存证的成功用例
#[test]
fn create_claim_works() {
new_test_ext(). |
#[test]
fn create_claim_failed_when_claim_already_exist() {
new_test_ext().execute_with(||{
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::create_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ProofAlreadyExist
);
})
}
//吊销存证的成功用例
#[test]
fn revoke_claim_works() {
new_test_ext().execute_with(|| {
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_ok!(PoeModule::revoke_claim(Origin::signed(1), claim.clone()));
assert_eq!(Proofs::<Test>::get(&claim), None);
})
}
//吊销存证当存证不存在的失败用例
#[test]
fn revoke_claim_failed_when_claim_is_not_exist() {
new_test_ext().execute_with(||{
let claim = vec![0, 1];
assert_noop!(
PoeModule::revoke_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ClaimNotExist
);
})
}
//吊销存证当调用者不是拥有者的失败用例
#[test]
fn revoke_claim_failed_when_origin_is_not_owner() {
new_test_ext().execute_with(||{
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::revoke_claim(Origin::signed(2), claim.clone()),
Error::<Test>::NotProofOwner
);
})
}
//转移存证的成功用例
#[test]
fn transfer_claim_works() {
new_test_ext().execute_with(||{
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_ok!(PoeModule::transfer_claim(Origin::signed(1), claim.clone(), 2));
assert_eq!(
Proofs::<Test>::get(&claim),
Some((2, frame_system::Pallet::<Test>::block_number()))
);
assert_noop!(
PoeModule::revoke_claim(Origin::signed(1), claim.clone()),
Error::<Test>::NotProofOwner
);
})
}
//转移存证,当存证不存在的失败用例
#[test]
fn transfer_claim_failed_when_claim_is_not_exist() {
new_test_ext().execute_with(||{
let claim = vec![0, 1];
assert_noop!(
PoeModule::transfer_claim(Origin::signed(1), claim.clone(), 2),
Error::<Test>::ClaimNotExist
);
})
}
//转移存证,当调用者不是拥有者的失败用例
#[test]
fn transfer_claim_failed_when_origin_is_not_owner() {
new_test_ext().execute_with(||{
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::transfer_claim(Origin::signed(3), claim.clone(), 2),
Error::<Test>::NotProofOwner
);
})
}
//创建存证,当存证内容长度超过上限的失败用例
#[test]
fn create_claim_failed_when_size_too_large() {
new_test_ext().execute_with(||{
let claim = vec![0; ClaimSize::get() + 1];
assert_noop!(
PoeModule::create_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ClaimSizeTooLarge
);
});
} | execute_with(||{
let claim = vec![0; ClaimSize::get()];
assert_ok!(PoeModule::create_claim(Origin::signed(1), claim.clone()));
assert_eq!(
Proofs::<Test>::get(&claim),
Some((1,frame_system::Pallet::<Test>::block_number())
));
});
}
//创建已存在的存证的失败用例 |
index.js | 'use strict';
var cache = require('../../lib/cache');
module.exports = function (router) {
router.get('/', function (req, res) {
cache.get('name', function(err, reply) {
var username;
// Switch to render a 503 page
if (err) console.log(err);
if (reply) {
username = reply;
} else {
username = 'not found';
}
| username: username
});
});
});
}; | res.render('website/services', {
name: 'index', |
views.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::{format_err, Error, Result};
use libra_crypto::HashValue;
use libra_types::{
account_config::{
AccountResource, AccountRole, BalanceResource, BurnEvent, CancelBurnEvent,
CurrencyInfoResource, MintEvent, NewBlockEvent, NewEpochEvent, PreburnEvent,
ReceivedPaymentEvent, SentPaymentEvent, UpgradeEvent,
},
account_state_blob::AccountStateWithProof,
contract_event::ContractEvent,
epoch_change::EpochChangeProof,
ledger_info::LedgerInfoWithSignatures,
proof::{AccountStateProof, AccumulatorConsistencyProof},
transaction::{Transaction, TransactionArgument, TransactionPayload},
vm_error::StatusCode,
};
use move_core_types::{
identifier::Identifier,
language_storage::{StructTag, TypeTag},
move_resource::MoveResource,
};
use serde::{Deserialize, Serialize};
use std::{collections::BTreeMap, convert::TryFrom};
use transaction_builder::get_transaction_name;
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct AmountView {
pub amount: u64,
pub currency: String,
}
impl AmountView {
fn new(amount: u64, currency: &str) -> Self {
Self {
amount,
currency: currency.to_string(),
}
}
}
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub enum AccountRoleView {
#[serde(rename = "unknown")]
Unknown,
#[serde(rename = "unhosted")]
Unhosted,
#[serde(rename = "empty")]
Empty,
#[serde(rename = "child_vasp")]
ChildVASP { parent_vasp_address: BytesView },
#[serde(rename = "parent_vasp")]
ParentVASP {
human_name: String,
base_url: String,
expiration_time: u64,
compliance_key: BytesView,
},
}
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct AccountView {
pub balances: Vec<AmountView>,
pub sequence_number: u64,
pub authentication_key: BytesView,
pub sent_events_key: BytesView,
pub received_events_key: BytesView,
pub delegated_key_rotation_capability: bool,
pub delegated_withdrawal_capability: bool,
pub is_frozen: bool,
pub role_id: u64,
pub role: AccountRoleView,
}
impl AccountView {
pub fn new(
account: &AccountResource,
balances: BTreeMap<Identifier, BalanceResource>,
account_role: AccountRole,
) -> Self {
Self {
balances: balances
.iter()
.map(|(currency_code, balance)| {
AmountView::new(balance.coin(), ¤cy_code.as_str())
})
.collect(),
sequence_number: account.sequence_number(),
authentication_key: BytesView::from(account.authentication_key()),
sent_events_key: BytesView::from(account.sent_events().key().as_bytes()),
received_events_key: BytesView::from(account.received_events().key().as_bytes()),
delegated_key_rotation_capability: account.has_delegated_key_rotation_capability(),
delegated_withdrawal_capability: account.has_delegated_withdrawal_capability(),
is_frozen: account.is_frozen(),
role_id: account.role_id(),
role: AccountRoleView::from(account_role),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct EventView {
pub key: BytesView,
pub sequence_number: u64,
pub transaction_version: u64,
pub data: EventDataView,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(tag = "type")]
pub enum EventDataView {
#[serde(rename = "burn")]
Burn {
amount: AmountView,
preburn_address: BytesView,
},
#[serde(rename = "cancelburn")]
CancelBurn {
amount: AmountView,
preburn_address: BytesView,
},
#[serde(rename = "mint")]
Mint { amount: AmountView },
#[serde(rename = "preburn")]
Preburn {
amount: AmountView,
preburn_address: BytesView,
},
#[serde(rename = "receivedpayment")]
ReceivedPayment {
amount: AmountView,
sender: BytesView,
metadata: BytesView,
},
#[serde(rename = "sentpayment")]
SentPayment {
amount: AmountView,
receiver: BytesView,
metadata: BytesView,
},
#[serde(rename = "upgrade")]
Upgrade { write_set: BytesView },
#[serde(rename = "newepoch")]
NewEpoch { epoch: u64 },
#[serde(rename = "newblock")]
NewBlock {
round: u64,
proposer: BytesView,
proposed_time: u64,
},
#[serde(rename = "unknown")]
Unknown {},
}
impl From<(u64, ContractEvent)> for EventView {
/// Tries to convert the provided byte array into Event Key.
fn from((txn_version, event): (u64, ContractEvent)) -> EventView {
let event_data = if event.type_tag() == &TypeTag::Struct(ReceivedPaymentEvent::struct_tag())
{
if let Ok(received_event) = ReceivedPaymentEvent::try_from(&event) {
let amount_view = AmountView::new(
received_event.amount(),
received_event.currency_code().as_str(),
);
Ok(EventDataView::ReceivedPayment {
amount: amount_view,
sender: BytesView::from(received_event.sender().as_ref()),
metadata: BytesView::from(received_event.metadata()),
})
} else {
Err(format_err!("Unable to parse ReceivedPaymentEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(SentPaymentEvent::struct_tag()) {
if let Ok(sent_event) = SentPaymentEvent::try_from(&event) {
let amount_view =
AmountView::new(sent_event.amount(), sent_event.currency_code().as_str());
Ok(EventDataView::SentPayment {
amount: amount_view,
receiver: BytesView::from(sent_event.receiver().as_ref()),
metadata: BytesView::from(sent_event.metadata()),
})
} else {
Err(format_err!("Unable to parse SentPaymentEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(BurnEvent::struct_tag()) {
if let Ok(burn_event) = BurnEvent::try_from(&event) {
let amount_view =
AmountView::new(burn_event.amount(), burn_event.currency_code().as_str());
let preburn_address = BytesView::from(burn_event.preburn_address().as_ref());
Ok(EventDataView::Burn {
amount: amount_view,
preburn_address,
})
} else {
Err(format_err!("Unable to parse BurnEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(CancelBurnEvent::struct_tag()) | else if event.type_tag() == &TypeTag::Struct(MintEvent::struct_tag()) {
if let Ok(mint_event) = MintEvent::try_from(&event) {
let amount_view =
AmountView::new(mint_event.amount(), mint_event.currency_code().as_str());
Ok(EventDataView::Mint {
amount: amount_view,
})
} else {
Err(format_err!("Unable to parse MintEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(PreburnEvent::struct_tag()) {
if let Ok(preburn_event) = PreburnEvent::try_from(&event) {
let amount_view = AmountView::new(
preburn_event.amount(),
preburn_event.currency_code().as_str(),
);
let preburn_address = BytesView::from(preburn_event.preburn_address().as_ref());
Ok(EventDataView::Preburn {
amount: amount_view,
preburn_address,
})
} else {
Err(format_err!("Unable to parse PreBurnEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(NewBlockEvent::struct_tag()) {
if let Ok(new_block_event) = NewBlockEvent::try_from(&event) {
Ok(EventDataView::NewBlock {
proposer: BytesView::from(new_block_event.proposer().as_ref()),
round: new_block_event.round(),
proposed_time: new_block_event.proposed_time(),
})
} else {
Err(format_err!("Unable to parse NewBlockEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(NewEpochEvent::struct_tag()) {
if let Ok(new_epoch_event) = NewEpochEvent::try_from(&event) {
Ok(EventDataView::NewEpoch {
epoch: new_epoch_event.epoch(),
})
} else {
Err(format_err!("Unable to parse NewEpochEvent"))
}
} else if event.type_tag() == &TypeTag::Struct(UpgradeEvent::struct_tag()) {
if let Ok(upgrade_event) = UpgradeEvent::try_from(&event) {
Ok(EventDataView::Upgrade {
write_set: BytesView::from(upgrade_event.write_set()),
})
} else {
Err(format_err!("Unable to parse UpgradeEvent"))
}
} else {
Err(format_err!("Unknown events"))
};
EventView {
key: BytesView::from(event.key().as_bytes()),
sequence_number: event.sequence_number(),
transaction_version: txn_version,
data: event_data.unwrap_or(EventDataView::Unknown {}),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub struct BlockMetadata {
pub version: u64,
pub timestamp: u64,
}
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]
pub struct BytesView(pub String);
impl BytesView {
pub fn into_bytes(self) -> Result<Vec<u8>, Error> {
Ok(hex::decode(self.0)?)
}
}
impl From<&[u8]> for BytesView {
fn from(bytes: &[u8]) -> Self {
Self(hex::encode(bytes))
}
}
impl From<&Vec<u8>> for BytesView {
fn from(bytes: &Vec<u8>) -> Self {
Self(hex::encode(bytes))
}
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub struct TransactionView {
pub version: u64,
pub transaction: TransactionDataView,
pub hash: String,
pub events: Vec<EventView>,
pub vm_status: StatusCode,
pub gas_used: u64,
}
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(tag = "type")]
pub enum TransactionDataView {
#[serde(rename = "blockmetadata")]
BlockMetadata { timestamp_usecs: u64 },
#[serde(rename = "writeset")]
WriteSet {},
#[serde(rename = "user")]
UserTransaction {
sender: String,
signature_scheme: String,
signature: String,
public_key: String,
sequence_number: u64,
max_gas_amount: u64,
gas_unit_price: u64,
gas_currency: String,
expiration_time: u64,
script_hash: String,
script: ScriptView,
},
#[serde(rename = "unknown")]
UnknownTransaction {},
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(tag = "type")]
// TODO cover all script types
pub enum ScriptView {
#[serde(rename = "peer_to_peer_transaction")]
PeerToPeer {
receiver: String,
amount: u64,
currency: String,
metadata: BytesView,
metadata_signature: BytesView,
},
#[serde(rename = "mint_transaction")]
Mint {
receiver: String,
currency: String,
auth_key_prefix: BytesView,
amount: u64,
},
#[serde(rename = "unknown_transaction")]
Unknown {},
}
impl ScriptView {
// TODO cover all script types
pub fn get_name(&self) -> String {
match self {
ScriptView::PeerToPeer { .. } => "peer to peer transaction".to_string(),
ScriptView::Mint { .. } => "mint transaction".to_string(),
ScriptView::Unknown { .. } => "unknown transaction".to_string(),
}
}
}
impl From<Transaction> for TransactionDataView {
fn from(tx: Transaction) -> Self {
let x = match tx {
Transaction::BlockMetadata(t) => {
t.into_inner().map(|x| TransactionDataView::BlockMetadata {
timestamp_usecs: x.1,
})
}
Transaction::WaypointWriteSet(_) => Ok(TransactionDataView::WriteSet {}),
Transaction::UserTransaction(t) => {
let script_hash = match t.payload() {
TransactionPayload::Script(s) => HashValue::sha3_256_of(s.code()),
_ => HashValue::zero(),
}
.to_hex();
Ok(TransactionDataView::UserTransaction {
sender: t.sender().to_string(),
signature_scheme: t.authenticator().scheme().to_string(),
signature: hex::encode(t.authenticator().signature_bytes()),
public_key: hex::encode(t.authenticator().public_key_bytes()),
sequence_number: t.sequence_number(),
max_gas_amount: t.max_gas_amount(),
gas_unit_price: t.gas_unit_price(),
gas_currency: t.gas_currency_code().to_string(),
expiration_time: t.expiration_time().as_secs(),
script_hash,
script: t.into_raw_transaction().into_payload().into(),
})
}
};
x.unwrap_or(TransactionDataView::UnknownTransaction {})
}
}
impl From<AccountRole> for AccountRoleView {
fn from(role: AccountRole) -> Self {
match role {
AccountRole::Unhosted => AccountRoleView::Unhosted,
AccountRole::Unknown => AccountRoleView::Unknown,
AccountRole::ChildVASP(child_vasp) => AccountRoleView::ChildVASP {
parent_vasp_address: BytesView::from(&child_vasp.parent_vasp_addr().to_vec()),
},
AccountRole::ParentVASP(parent_vasp) => AccountRoleView::ParentVASP {
human_name: parent_vasp.human_name().to_string(),
base_url: parent_vasp.base_url().to_string(),
expiration_time: parent_vasp.expiration_date(),
compliance_key: BytesView::from(parent_vasp.compliance_public_key()),
},
}
}
}
impl From<TransactionPayload> for ScriptView {
fn from(value: TransactionPayload) -> Self {
let empty_vec: Vec<TransactionArgument> = vec![];
let empty_ty_vec: Vec<String> = vec![];
let unknown_currency = "unknown_currency".to_string();
let (code, args, ty_args) = match value {
TransactionPayload::WriteSet(_) => ("genesis".to_string(), empty_vec, empty_ty_vec),
TransactionPayload::Script(script) => (
get_transaction_name(script.code()),
script.args().to_vec(),
script
.ty_args()
.iter()
.map(|type_tag| match type_tag {
TypeTag::Struct(StructTag { module, .. }) => module.to_string(),
tag => format!("{}", tag),
})
.collect(),
),
TransactionPayload::Module(_) => {
("module publishing".to_string(), empty_vec, empty_ty_vec)
}
};
let res = match code.as_str() {
"peer_to_peer_with_metadata_transaction" => {
if let [TransactionArgument::Address(receiver), TransactionArgument::U64(amount), TransactionArgument::U8Vector(metadata), TransactionArgument::U8Vector(metadata_signature)] =
&args[..]
{
Ok(ScriptView::PeerToPeer {
receiver: receiver.to_string(),
amount: *amount,
currency: ty_args.get(0).unwrap_or(&unknown_currency).to_string(),
metadata: BytesView::from(metadata),
metadata_signature: BytesView::from(metadata_signature),
})
} else {
Err(format_err!("Unable to parse PeerToPeer arguments"))
}
}
"mint" => {
if let [TransactionArgument::Address(receiver), TransactionArgument::U8Vector(auth_key_prefix), TransactionArgument::U64(amount)] =
&args[..]
{
let currency = ty_args.get(0).unwrap_or(&unknown_currency).to_string();
Ok(ScriptView::Mint {
receiver: receiver.to_string(),
auth_key_prefix: BytesView::from(auth_key_prefix),
amount: *amount,
currency,
})
} else {
Err(format_err!("Unable to parse PeerToPeer arguments"))
}
}
_ => Err(format_err!("Unknown scripts")),
};
res.unwrap_or(ScriptView::Unknown {})
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct CurrencyInfoView {
pub code: String,
pub scaling_factor: u64,
pub fractional_part: u64,
}
impl From<CurrencyInfoResource> for CurrencyInfoView {
fn from(info: CurrencyInfoResource) -> CurrencyInfoView {
CurrencyInfoView {
code: info.currency_code().to_string(),
scaling_factor: info.scaling_factor(),
fractional_part: info.fractional_part(),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct StateProofView {
pub ledger_info_with_signatures: BytesView,
pub epoch_change_proof: BytesView,
pub ledger_consistency_proof: BytesView,
}
impl
TryFrom<(
LedgerInfoWithSignatures,
EpochChangeProof,
AccumulatorConsistencyProof,
)> for StateProofView
{
type Error = Error;
fn try_from(
(ledger_info_with_signatures, epoch_change_proof, ledger_consistency_proof): (
LedgerInfoWithSignatures,
EpochChangeProof,
AccumulatorConsistencyProof,
),
) -> Result<StateProofView, Self::Error> {
Ok(StateProofView {
ledger_info_with_signatures: BytesView::from(&lcs::to_bytes(
&ledger_info_with_signatures,
)?),
epoch_change_proof: BytesView::from(&lcs::to_bytes(&epoch_change_proof)?),
ledger_consistency_proof: BytesView::from(&lcs::to_bytes(&ledger_consistency_proof)?),
})
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct AccountStateWithProofView {
pub version: u64,
pub blob: Option<BytesView>,
pub proof: AccountStateProofView,
}
impl TryFrom<AccountStateWithProof> for AccountStateWithProofView {
type Error = Error;
fn try_from(
account_state_with_proof: AccountStateWithProof,
) -> Result<AccountStateWithProofView, Error> {
let blob = if let Some(account_blob) = account_state_with_proof.blob {
Some(BytesView::from(&lcs::to_bytes(&account_blob)?))
} else {
None
};
Ok(AccountStateWithProofView {
version: account_state_with_proof.version,
blob,
proof: AccountStateProofView::try_from(account_state_with_proof.proof)?,
})
}
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct AccountStateProofView {
pub ledger_info_to_transaction_info_proof: BytesView,
pub transaction_info: BytesView,
pub transaction_info_to_account_proof: BytesView,
}
impl TryFrom<AccountStateProof> for AccountStateProofView {
type Error = Error;
fn try_from(account_state_proof: AccountStateProof) -> Result<AccountStateProofView, Error> {
Ok(AccountStateProofView {
ledger_info_to_transaction_info_proof: BytesView::from(&lcs::to_bytes(
account_state_proof
.transaction_info_with_proof()
.ledger_info_to_transaction_info_proof(),
)?),
transaction_info: BytesView::from(&lcs::to_bytes(
account_state_proof
.transaction_info_with_proof()
.transaction_info(),
)?),
transaction_info_to_account_proof: BytesView::from(&lcs::to_bytes(
account_state_proof.transaction_info_to_account_proof(),
)?),
})
}
}
| {
if let Ok(cancel_burn_event) = CancelBurnEvent::try_from(&event) {
let amount_view = AmountView::new(
cancel_burn_event.amount(),
cancel_burn_event.currency_code().as_str(),
);
let preburn_address = BytesView::from(cancel_burn_event.preburn_address().as_ref());
Ok(EventDataView::CancelBurn {
amount: amount_view,
preburn_address,
})
} else {
Err(format_err!("Unable to parse CancelBurnEvent"))
}
} |
issue-17732.rs | // compile-pass
#![allow(dead_code)]
// pretty-expanded FIXME #23616
trait Person {
type string;
fn | (&self) { }
}
struct Someone<P: Person>(std::marker::PhantomData<P>);
fn main() {}
| dummy |
fstring.rs | use std::iter;
use std::mem;
use std::str;
use crate::ast::{ConversionFlag, StringGroup};
use crate::error::{FStringError, FStringErrorType};
use crate::location::Location;
use crate::parser::parse_expression;
use self::FStringErrorType::*;
use self::StringGroup::*;
struct FStringParser<'a> {
chars: iter::Peekable<str::Chars<'a>>,
}
impl<'a> FStringParser<'a> {
fn new(source: &'a str) -> Self {
Self {
chars: source.chars().peekable(),
}
}
fn parse_formatted_value(&mut self) -> Result<StringGroup, FStringErrorType> {
let mut expression = String::new();
let mut spec = None;
let mut delims = Vec::new();
let mut conversion = None;
while let Some(ch) = self.chars.next() { | return Err(EmptyExpression);
}
conversion = Some(match self.chars.next() {
Some('s') => ConversionFlag::Str,
Some('a') => ConversionFlag::Ascii,
Some('r') => ConversionFlag::Repr,
Some(_) => {
return Err(InvalidConversionFlag);
}
None => {
return Err(ExpectedRbrace);
}
});
if self.chars.peek() != Some(&'}') {
return Err(ExpectedRbrace);
}
}
':' if delims.is_empty() => {
let mut nested = false;
let mut in_nested = false;
let mut spec_expression = String::new();
while let Some(&next) = self.chars.peek() {
match next {
'{' => {
if in_nested {
return Err(ExpressionNestedTooDeeply);
}
in_nested = true;
nested = true;
self.chars.next();
continue;
}
'}' => {
if in_nested {
in_nested = false;
self.chars.next();
}
break;
}
_ => (),
}
spec_expression.push(next);
self.chars.next();
}
if in_nested {
return Err(UnclosedLbrace);
}
if nested {
spec = Some(Box::new(FormattedValue {
value: Box::new(
parse_expression(spec_expression.trim())
.map_err(|e| InvalidExpression(Box::new(e.error)))?,
),
conversion: None,
spec: None,
}))
} else {
spec = Some(Box::new(Constant {
value: spec_expression.to_owned(),
}))
}
}
'(' | '{' | '[' => {
expression.push(ch);
delims.push(ch);
}
')' => {
if delims.pop() != Some('(') {
return Err(MismatchedDelimiter);
}
expression.push(ch);
}
']' => {
if delims.pop() != Some('[') {
return Err(MismatchedDelimiter);
}
expression.push(ch);
}
'}' if !delims.is_empty() => {
if delims.pop() != Some('{') {
return Err(MismatchedDelimiter);
}
expression.push(ch);
}
'}' => {
if expression.is_empty() {
return Err(EmptyExpression);
}
return Ok(FormattedValue {
value: Box::new(
parse_expression(expression.trim())
.map_err(|e| InvalidExpression(Box::new(e.error)))?,
),
conversion,
spec,
});
}
'"' | '\'' => {
expression.push(ch);
while let Some(next) = self.chars.next() {
expression.push(next);
if next == ch {
break;
}
}
}
_ => {
expression.push(ch);
}
}
}
Err(UnclosedLbrace)
}
fn parse(mut self) -> Result<StringGroup, FStringErrorType> {
let mut content = String::new();
let mut values = vec![];
while let Some(ch) = self.chars.next() {
match ch {
'{' => {
if let Some('{') = self.chars.peek() {
self.chars.next();
content.push('{');
} else {
if !content.is_empty() {
values.push(Constant {
value: mem::replace(&mut content, String::new()),
});
}
values.push(self.parse_formatted_value()?);
}
}
'}' => {
if let Some('}') = self.chars.peek() {
self.chars.next();
content.push('}');
} else {
return Err(UnopenedRbrace);
}
}
_ => {
content.push(ch);
}
}
}
if !content.is_empty() {
values.push(Constant { value: content })
}
Ok(match values.len() {
0 => Constant {
value: String::new(),
},
1 => values.into_iter().next().unwrap(),
_ => Joined { values },
})
}
}
/// Parse an f-string into a string group.
fn parse_fstring(source: &str) -> Result<StringGroup, FStringErrorType> {
FStringParser::new(source).parse()
}
/// Parse an fstring from a string, located at a certain position in the sourcecode.
/// In case of errors, we will get the location and the error returned.
pub fn parse_located_fstring(
source: &str,
location: Location,
) -> Result<StringGroup, FStringError> {
parse_fstring(source).map_err(|error| FStringError { error, location })
}
#[cfg(test)]
mod tests {
use crate::ast;
use super::*;
fn mk_ident(name: &str, row: usize, col: usize) -> ast::Expression {
ast::Expression {
location: ast::Location::new(row, col),
node: ast::ExpressionType::Identifier {
name: name.to_owned(),
},
}
}
#[test]
fn test_parse_fstring() {
let source = String::from("{a}{ b }{{foo}}");
let parse_ast = parse_fstring(&source).unwrap();
assert_eq!(
parse_ast,
Joined {
values: vec![
FormattedValue {
value: Box::new(mk_ident("a", 1, 1)),
conversion: None,
spec: None,
},
FormattedValue {
value: Box::new(mk_ident("b", 1, 1)),
conversion: None,
spec: None,
},
Constant {
value: "{foo}".to_owned()
}
]
}
);
}
#[test]
fn test_parse_fstring_nested_spec() {
let source = String::from("{foo:{spec}}");
let parse_ast = parse_fstring(&source).unwrap();
assert_eq!(
parse_ast,
FormattedValue {
value: Box::new(mk_ident("foo", 1, 1)),
conversion: None,
spec: Some(Box::new(FormattedValue {
value: Box::new(mk_ident("spec", 1, 1)),
conversion: None,
spec: None,
})),
}
);
}
#[test]
fn test_parse_fstring_not_nested_spec() {
let source = String::from("{foo:spec}");
let parse_ast = parse_fstring(&source).unwrap();
assert_eq!(
parse_ast,
FormattedValue {
value: Box::new(mk_ident("foo", 1, 1)),
conversion: None,
spec: Some(Box::new(Constant {
value: "spec".to_owned(),
})),
}
);
}
#[test]
fn test_parse_empty_fstring() {
assert_eq!(
parse_fstring(""),
Ok(Constant {
value: String::new(),
}),
);
}
#[test]
fn test_parse_invalid_fstring() {
assert_eq!(parse_fstring("{5!a"), Err(ExpectedRbrace));
assert_eq!(parse_fstring("{5!a1}"), Err(ExpectedRbrace));
assert_eq!(parse_fstring("{5!"), Err(ExpectedRbrace));
assert_eq!(parse_fstring("abc{!a 'cat'}"), Err(EmptyExpression));
assert_eq!(parse_fstring("{!a"), Err(EmptyExpression));
assert_eq!(parse_fstring("{ !a}"), Err(EmptyExpression));
assert_eq!(parse_fstring("{5!}"), Err(InvalidConversionFlag));
assert_eq!(parse_fstring("{5!x}"), Err(InvalidConversionFlag));
assert_eq!(parse_fstring("{a:{a:{b}}"), Err(ExpressionNestedTooDeeply));
assert_eq!(parse_fstring("{a:b}}"), Err(UnopenedRbrace));
assert_eq!(parse_fstring("}"), Err(UnopenedRbrace));
assert_eq!(parse_fstring("{a:{b}"), Err(UnclosedLbrace));
assert_eq!(parse_fstring("{"), Err(UnclosedLbrace));
// TODO: check for InvalidExpression enum?
assert!(parse_fstring("{class}").is_err());
}
#[test]
fn test_parse_fstring_not_equals() {
let source = String::from("{1 != 2}");
let parse_ast = parse_fstring(&source);
assert!(parse_ast.is_ok());
}
} | match ch {
'!' if delims.is_empty() && self.chars.peek() != Some(&'=') => {
if expression.trim().is_empty() { |
input.rs | #![allow(dead_code)]
use sdl::event::Key;
pub struct Input {
keys: [bool; 16]
}
impl Input {
pub fn new() -> Input {
Input { keys: [false; 16] }
}
pub fn pressed(&mut self, index: usize) -> bool {
self.keys[index]
}
pub fn press(&mut self, key: Key, state: bool) {
match key {
Key::Num1 => self.set_key(0x1, state),
Key::Num2 => self.set_key(0x2, state),
Key::Num3 => self.set_key(0x3, state),
Key::Num4 => self.set_key(0xc, state),
Key::Q => self.set_key(0x4, state),
Key::W => self.set_key(0x5, state),
Key::E => self.set_key(0x6, state),
Key::R => self.set_key(0xd, state),
Key::A => self.set_key(0x7, state),
Key::S => self.set_key(0x8, state),
Key::D => self.set_key(0x9, state),
Key::F => self.set_key(0xe, state),
Key::Z => self.set_key(0xa, state),
Key::X => self.set_key(0x0, state),
Key::C => self.set_key(0xb, state),
Key::V => self.set_key(0xf, state), | }
fn set_key(&mut self, index: usize, state: bool) {
self.keys[index] = state;
}
} | _ => ()
} |
delete_scdn_domain.go | package scdn
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// DeleteScdnDomain invokes the scdn.DeleteScdnDomain API synchronously
func (client *Client) DeleteScdnDomain(request *DeleteScdnDomainRequest) (response *DeleteScdnDomainResponse, err error) {
response = CreateDeleteScdnDomainResponse()
err = client.DoAction(request, response)
return
}
// DeleteScdnDomainWithChan invokes the scdn.DeleteScdnDomain API asynchronously
func (client *Client) DeleteScdnDomainWithChan(request *DeleteScdnDomainRequest) (<-chan *DeleteScdnDomainResponse, <-chan error) {
responseChan := make(chan *DeleteScdnDomainResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.DeleteScdnDomain(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// DeleteScdnDomainWithCallback invokes the scdn.DeleteScdnDomain API asynchronously
func (client *Client) DeleteScdnDomainWithCallback(request *DeleteScdnDomainRequest, callback func(response *DeleteScdnDomainResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *DeleteScdnDomainResponse
var err error
defer close(result)
response, err = client.DeleteScdnDomain(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// DeleteScdnDomainRequest is the request struct for api DeleteScdnDomain
type DeleteScdnDomainRequest struct {
*requests.RpcRequest
OwnerAccount string `position:"Query" name:"OwnerAccount"`
DomainName string `position:"Query" name:"DomainName"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
ResourceGroupId string `position:"Query" name:"ResourceGroupId"`
SecurityToken string `position:"Query" name:"SecurityToken"`
}
// DeleteScdnDomainResponse is the response struct for api DeleteScdnDomain
type DeleteScdnDomainResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
}
// CreateDeleteScdnDomainRequest creates a request to invoke DeleteScdnDomain API
func | () (request *DeleteScdnDomainRequest) {
request = &DeleteScdnDomainRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("scdn", "2017-11-15", "DeleteScdnDomain", "", "")
request.Method = requests.POST
return
}
// CreateDeleteScdnDomainResponse creates a response to parse from DeleteScdnDomain response
func CreateDeleteScdnDomainResponse() (response *DeleteScdnDomainResponse) {
response = &DeleteScdnDomainResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| CreateDeleteScdnDomainRequest |
main.rs | #![deny(warnings)]
#![cfg_attr(target_os = "cuda", feature(abi_ptx))]
#![cfg_attr(target_os = "cuda", no_std)]
#[no_mangle]
#[cfg(target_os = "cuda")]
pub unsafe extern "ptx-kernel" fn example_kernel(a: i32, b: i32) {
use ptx_support::prelude::*;
if Context::thread().index() == (1, 0, 0) {
assert_eq!(a, b);
}
}
#[cfg(not(target_os = "cuda"))]
fn | () {
use cuda::driver;
use cuda::driver::{Any, Block, Device, Grid};
use std::ffi::CString;
driver::initialize().expect("Unable to initialize CUDA");
let ptx_assembly =
CString::new(include_str!(env!("KERNEL_PTX_PATH"))).expect("Unable to create sources");
let kernel_name = CString::new("example_kernel").expect("Unable to create kernel name string");
let context = {
Device(0)
.expect("Unable to get CUDA device 0")
.create_context()
.expect("Unable to create CUDA context")
};
let module = {
context
.load_module(&ptx_assembly)
.expect("Unable to create module")
};
let kernel = {
module
.function(&kernel_name)
.expect("Unable to find the kernel")
};
println!("You should now see a panic right from the kernel:");
kernel
.launch(&[Any(&10i32), Any(&0i32)], Grid::x(2), Block::x(8))
.unwrap_err();
}
| main |
validate_spec_test.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import mock
import pytest
from .validate_spec_url_test import make_mock_responses
from .validate_spec_url_test import read_contents
from swagger_spec_validator.common import get_uri_from_file_path
from swagger_spec_validator.common import SwaggerValidationError
from swagger_spec_validator.validator12 import validate_data_type
from swagger_spec_validator.validator12 import validate_model
from swagger_spec_validator.validator12 import validate_parameter
from swagger_spec_validator.validator12 import validate_spec
RESOURCE_LISTING_FILE = os.path.abspath('tests/data/v1.2/foo/swagger_api.json')
API_DECLARATION_FILE = os.path.abspath('tests/data/v1.2/foo/foo.json')
def get_resource_listing():
return read_contents(RESOURCE_LISTING_FILE)
def test_http_success():
mock_responses = make_mock_responses([API_DECLARATION_FILE])
with mock.patch(
'swagger_spec_validator.validator12.read_url',
side_effect=mock_responses
) as mock_read_url:
validate_spec(get_resource_listing(), 'http://localhost/api-docs')
mock_read_url.assert_called_once_with('http://localhost/api-docs/foo')
def | ():
mock_string = 'swagger_spec_validator.validator12.validate_api_declaration'
with mock.patch(mock_string) as mock_api:
validate_spec(
get_resource_listing(),
get_uri_from_file_path(RESOURCE_LISTING_FILE),
)
expected = read_contents(API_DECLARATION_FILE)
mock_api.assert_called_once_with(expected)
def test_validate_parameter_type_file_in_form():
parameter = {
'paramType': 'form',
'name': 'what',
'type': 'File',
}
# lack of errors is success
validate_parameter(parameter, [])
def test_validate_parameter_type_file_in_body():
parameter = {
'paramType': 'body',
'name': 'what',
'type': 'File',
}
with pytest.raises(SwaggerValidationError, match='Type "File" is only valid for form parameters'):
validate_parameter(parameter, [])
def test_validate_data_type_is_model():
model_id = 'MyModelId'
model_ids = [model_id, 'OtherModelId']
obj = {'type': model_id}
# lack of error is success
validate_data_type(obj, model_ids, allow_refs=False)
def test_validate_model_matches_id():
model = {"id": "mysupermodel"}
model_name = "mymodel"
model_ids = ""
with pytest.raises(SwaggerValidationError, match='model name: mymodel does not match model id: mysupermodel'):
validate_model(model, model_name, model_ids)
| test_file_uri_success |
preamble.rs | use aorist_primitives::AVec;
use crate::code::Preamble;
#[cfg(feature = "python")]
use crate::python::PythonPreamble;
use crate::r::r_import::RImport;
use extendr_api::prelude::*;
use std::hash::Hash;
#[derive(Clone, PartialEq, Hash, Eq)]
pub struct RPreamble {
pub libraries: AVec<RImport>,
pub body: AString,
}
impl Preamble for RPreamble {
type ImportType = RImport;
fn get_imports(&self) -> AVec<Self::ImportType> {
self.libraries.clone()
}
}
impl<'a> RPreamble {
// Assumes R has already been started
pub fn | (body: AString) -> RPreamble {
eval_string(
r#"
to.preamble <- function(body) {
x <- as.list(parse(text=body))
is.library <- sapply(x, function(y) {
if (class(y) == "call") {
return(y[[1]] == "library")
}
return(FALSE)
})
call.idx <- which(is.library)
calls <- x[call.idx]
not.calls <- x[which(!is.library)]
body <- paste(sapply(
not.calls,
function(x) paste(deparse(x), collapse="\n")
), collapse="\n\n")
libraries <- sapply(calls, function(x) x[[2]])
list(body=body, libraries=libraries)
}
"#,
)
.unwrap();
let res = call!("to.preamble", body).unwrap();
let body_no_imports = res.index(1).unwrap();
let libraries = res.index(2).unwrap();
Self {
libraries: match libraries.as_string_vector() {
Some(v) => v.into_iter().map(|x| RImport::new(x)).collect(),
None => AVec::new(),
},
body: body_no_imports.as_str().unwrap().to_string(),
}
}
#[cfg(feature = "python")]
pub fn from_python(var_name: AString, body: AString) -> RPreamble {
let python_preamble = PythonPreamble::new(body);
let formatted = python_preamble.to_string().replace("'", "\\'");
Self {
libraries: vec![RImport::new("reticulate".into())],
body: format!("{} <- '\n{}'", var_name, formatted).to_string(),
}
}
pub fn get_body(&self) -> String {
self.body.clone()
}
}
#[allow(unused_imports)]
mod r_test_preamble {
use crate::r::preamble::RPreamble;
use extendr_api::prelude::*;
#[test]
fn test_basic_preamble() {
test! {
let body = r#"
library('ggplot2')
library('igraph')
c(1)
f <- function(a, b) {
a + b
}
"#;
let preamble = RPreamble::new(body.to_string());
assert_eq!(preamble.libraries.get(0).unwrap().library, "ggplot2");
assert_eq!(preamble.libraries.get(1).unwrap().library, "igraph");
assert_eq!(preamble.body, r#"c(1)
f <- function(a, b) {
a + b
}"#);
}
}
}
| new |
package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Shapelib(CMakePackage):
| """The Shapefile C Library provides the ability to write simple C programs
for reading, writing and updating (to a limited extent) ESRI Shapefiles,
and the associated attribute file (.dbf).
"""
homepage = "http://shapelib.maptools.org/"
url = "https://github.com/OSGeo/shapelib/archive/v1.5.0.tar.gz"
version('1.5.0', sha256='48de3a6a8691b0b111b909c0b908af4627635c75322b3a501c0c0885f3558cad') |
|
body_dump.go | package middleware
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"net"
"net/http"
"github.com/linthan/echo/v4"
)
type (
// BodyDumpConfig defines the config for BodyDump middleware.
BodyDumpConfig struct {
// Skipper defines a function to skip middleware.
Skipper Skipper
// Handler receives request and response payload.
// Required.
Handler BodyDumpHandler
}
// BodyDumpHandler receives the request and response payload.
BodyDumpHandler func(echo.Context, []byte, []byte)
bodyDumpResponseWriter struct {
io.Writer
http.ResponseWriter
}
)
var (
// DefaultBodyDumpConfig is the default BodyDump middleware config.
DefaultBodyDumpConfig = BodyDumpConfig{
Skipper: DefaultSkipper,
}
)
// BodyDump returns a BodyDump middleware.
//
// BodyLimit middleware captures the request and response payload and calls the
// registered handler.
func BodyDump(handler BodyDumpHandler) echo.MiddlewareFunc |
// BodyDumpWithConfig returns a BodyDump middleware with config.
// See: `BodyDump()`.
func BodyDumpWithConfig(config BodyDumpConfig) echo.MiddlewareFunc {
// Defaults
if config.Handler == nil {
panic("echo: body-dump middleware requires a handler function")
}
if config.Skipper == nil {
config.Skipper = DefaultBodyDumpConfig.Skipper
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) (err error) {
if config.Skipper(c) {
return next(c)
}
// Request
reqBody := []byte{}
if c.Request().Body != nil { // Read
reqBody, _ = ioutil.ReadAll(c.Request().Body)
}
c.Request().Body = ioutil.NopCloser(bytes.NewBuffer(reqBody)) // Reset
// Response
resBody := new(bytes.Buffer)
mw := io.MultiWriter(c.Response().Writer, resBody)
writer := &bodyDumpResponseWriter{Writer: mw, ResponseWriter: c.Response().Writer}
c.Response().Writer = writer
if err = next(c); err != nil {
c.Error(err)
}
// Callback
config.Handler(c, reqBody, resBody.Bytes())
return
}
}
}
func (w *bodyDumpResponseWriter) WriteHeader(code int) {
w.ResponseWriter.WriteHeader(code)
}
func (w *bodyDumpResponseWriter) Write(b []byte) (int, error) {
return w.Writer.Write(b)
}
func (w *bodyDumpResponseWriter) Flush() {
w.ResponseWriter.(http.Flusher).Flush()
}
func (w *bodyDumpResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return w.ResponseWriter.(http.Hijacker).Hijack()
}
| {
c := DefaultBodyDumpConfig
c.Handler = handler
return BodyDumpWithConfig(c)
} |
index.config.js | (function() {
'use strict';
angular
.module('blogapp')
.config(config);
config.$inject = ['$logProvider'];
function config($logProvider) {
$logProvider.debugEnabled(true);
} | })(); | |
http_access.go | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpaccess
import (
"bufio"
"net"
"net/http"
"time"
"github.com/sirupsen/logrus"
"github.com/newswarm-lab/new-bee/pkg/logging"
"github.com/newswarm-lab/new-bee/pkg/tracing"
)
// NewHTTPAccessLogHandler creates a handler that will log a message after a
// request has been served.
func NewHTTPAccessLogHandler(logger logging.Logger, level logrus.Level, tracer *tracing.Tracer, message string) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
rl := &responseLogger{w, 0, 0, level}
h.ServeHTTP(rl, r)
if rl.level == 0 {
return
}
ctx, _ := tracer.WithContextFromHTTPHeaders(r.Context(), r.Header)
logger := tracing.NewLoggerWithTraceID(ctx, logger)
status := rl.status
if status == 0 {
status = http.StatusOK
}
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
ip = r.RemoteAddr
}
fields := logrus.Fields{
"ip": ip,
"method": r.Method,
"uri": r.RequestURI,
"proto": r.Proto,
"status": status,
"size": rl.size,
"duration": time.Since(startTime).Seconds(),
}
if v := r.Referer(); v != "" {
fields["referrer"] = v
}
if v := r.UserAgent(); v != "" {
fields["user-agent"] = v
}
if v := r.Header.Get("X-Forwarded-For"); v != "" {
fields["x-forwarded-for"] = v
}
if v := r.Header.Get("X-Real-Ip"); v != "" {
fields["x-real-ip"] = v
}
logger.WithFields(fields).Log(rl.level, message)
})
}
}
// SetAccessLogLevelHandler overrides the log level set in
// NewHTTPAccessLogHandler for a specific endpoint. Use log level 0 to suppress
// log messages.
func SetAccessLogLevelHandler(level logrus.Level) func(h http.Handler) http.Handler |
type responseLogger struct {
w http.ResponseWriter
status int
size int
level logrus.Level
}
func (l *responseLogger) Header() http.Header {
return l.w.Header()
}
func (l *responseLogger) Flush() {
l.w.(http.Flusher).Flush()
}
func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return l.w.(http.Hijacker).Hijack()
}
func (l *responseLogger) CloseNotify() <-chan bool {
// staticcheck SA1019 CloseNotifier interface is required by gorilla compress handler
// nolint:staticcheck
return l.w.(http.CloseNotifier).CloseNotify() // skipcq: SCC-SA1019
}
func (l *responseLogger) Push(target string, opts *http.PushOptions) error {
return l.w.(http.Pusher).Push(target, opts)
}
func (l *responseLogger) Write(b []byte) (int, error) {
size, err := l.w.Write(b)
l.size += size
return size, err
}
func (l *responseLogger) WriteHeader(s int) {
l.w.WriteHeader(s)
if l.status == 0 {
l.status = s
}
}
| {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if rl, ok := w.(*responseLogger); ok {
rl.level = level
}
h.ServeHTTP(w, r)
})
}
} |
lib.rs | #![no_std]
use embedded_hal::blocking::delay::DelayUs;
use embedded_hal::digital::v2::OutputPin;
// Inspired by
// - https://github.com/polyfloyd/ledcat/blob/master/src/device/hub75.rs
// - https://github.com/mmou/led-marquee/blob/8c88531a6938edff6db829ca21c15304515874ea/src/hub.rs
// - https://github.com/adafruit/RGB-matrix-Panel/blob/master/RGBmatrixPanel.cpp
// - https://www.mikrocontroller.net/topic/452187 (sorry, german only)
/// # Theory of Operation
/// This display is essentially split in half, with the top 16 rows being
/// controlled by one set of shift registers (r1, g1, b1) and the botton 16
/// rows by another set (r2, g2, b2). So, the best way to update it is to
/// show one of the botton and top rows in tandem. The row (between 0-15) is then
/// selected by the A, B, C, D pins, which are just, as one might expect, the bits 0 to 3.
///
/// The display doesn't really do brightness, so we have to do it ourselves, by
/// rendering the same frame multiple times, with some pixels being turned of if
/// they are darker (pwm)
pub struct Hub75<PINS> {
// r1, g1, b1, r2, g2, b2, column, row
data: [[(u8, u8, u8, u8, u8, u8); 64]; 16],
brightness_step: u8,
brightness_count: u8,
pins: PINS,
}
/// A trait, so that it's easier to reason about the pins
/// Implemented for a tuple `(r1, g1, b1, r2, g2, b2, a, b, c, d, clk, lat, oe)`
/// with every element implementing `OutputPin`
pub trait Outputs {
type Error;
type R1: OutputPin<Error = Self::Error>;
type G1: OutputPin<Error = Self::Error>;
type B1: OutputPin<Error = Self::Error>;
type R2: OutputPin<Error = Self::Error>;
type G2: OutputPin<Error = Self::Error>;
type B2: OutputPin<Error = Self::Error>;
type A: OutputPin<Error = Self::Error>;
type B: OutputPin<Error = Self::Error>;
type C: OutputPin<Error = Self::Error>;
type D: OutputPin<Error = Self::Error>;
type CLK: OutputPin<Error = Self::Error>;
type LAT: OutputPin<Error = Self::Error>;
type OE: OutputPin<Error = Self::Error>;
fn r1(&mut self) -> &mut Self::R1;
fn g1(&mut self) -> &mut Self::G1;
fn b1(&mut self) -> &mut Self::B1;
fn r2(&mut self) -> &mut Self::R2;
fn g2(&mut self) -> &mut Self::G2;
fn b2(&mut self) -> &mut Self::B2;
fn a(&mut self) -> &mut Self::A;
fn b(&mut self) -> &mut Self::B;
fn c(&mut self) -> &mut Self::C;
fn d(&mut self) -> &mut Self::D;
fn clk(&mut self) -> &mut Self::CLK;
fn lat(&mut self) -> &mut Self::LAT;
fn oe(&mut self) -> &mut Self::OE;
}
impl<
E,
R1: OutputPin<Error = E>,
G1: OutputPin<Error = E>,
B1: OutputPin<Error = E>,
R2: OutputPin<Error = E>,
G2: OutputPin<Error = E>,
B2: OutputPin<Error = E>,
A: OutputPin<Error = E>,
B: OutputPin<Error = E>,
C: OutputPin<Error = E>,
D: OutputPin<Error = E>,
CLK: OutputPin<Error = E>,
LAT: OutputPin<Error = E>,
OE: OutputPin<Error = E>,
> Outputs for (R1, G1, B1, R2, G2, B2, A, B, C, D, CLK, LAT, OE)
{
type Error = E;
type R1 = R1;
type G1 = G1;
type B1 = B1;
type R2 = R2;
type G2 = G2;
type B2 = B2;
type A = A;
type B = B;
type C = C;
type D = D;
type CLK = CLK;
type LAT = LAT;
type OE = OE;
fn r1(&mut self) -> &mut R1 {
&mut self.0
}
fn g1(&mut self) -> &mut G1 {
&mut self.1
}
fn b1(&mut self) -> &mut B1 {
&mut self.2
}
fn r2(&mut self) -> &mut R2 {
&mut self.3
}
fn g2(&mut self) -> &mut G2 {
&mut self.4
}
fn b2(&mut self) -> &mut B2 {
&mut self.5
}
fn a(&mut self) -> &mut A {
&mut self.6
}
fn b(&mut self) -> &mut B {
&mut self.7
}
fn c(&mut self) -> &mut C {
&mut self.8
}
fn d(&mut self) -> &mut D {
&mut self.9
}
fn clk(&mut self) -> &mut CLK {
&mut self.10
}
fn lat(&mut self) -> &mut LAT {
&mut self.11
}
fn oe(&mut self) -> &mut OE {
&mut self.12
}
}
impl<PINS: Outputs> Hub75<PINS> {
/// Create a new hub instance
///
/// Takes an implementation of the Outputs trait,
/// using a tuple `(r1, g1, b1, r2, g2, b2, a, b, c, d, clk, lat, oe)`,
/// with every member implementing `OutputPin` is usually the right choice.
///
/// `brightness_bits` provides the number of brightness_bits for each color (1-8).
/// More bits allow for much more colors, especially in combination with the gamma correction,
/// but each extra bit doubles the time `output` will take. This might lead to noticable flicker.
///
/// 3-4 bits are usually a good choice.
pub fn new(pins: PINS, brightness_bits: u8) -> Self {
assert!(brightness_bits < 9 && brightness_bits > 0);
let data = [[(0, 0, 0, 0, 0, 0); 64]; 16];
let brightness_step = 1 << (8 - brightness_bits);
let brightness_count = ((1 << brightness_bits as u16) - 1) as u8;
Self {
data,
brightness_step,
brightness_count,
pins,
}
}
/// Output the buffer to the display
///
/// Takes some time and should be called quite often, otherwise the output
/// will flicker
pub fn output<DELAY: DelayUs<u8>>(&mut self, delay: &mut DELAY) -> Result<(), PINS::Error> {
// Enable the output
// The previous last row will continue to display
self.pins.oe().set_low()?;
// PWM cycle
for mut brightness in 0..self.brightness_count {
brightness = (brightness + 1).saturating_mul(self.brightness_step);
for (count, row) in self.data.iter().enumerate() {
for element in row.iter() {
if element.0 >= brightness {
self.pins.r1().set_high()?;
} else {
self.pins.r1().set_low()?;
}
if element.1 >= brightness {
self.pins.g1().set_high()?;
} else {
self.pins.g1().set_low()?;
}
if element.2 >= brightness {
self.pins.b1().set_high()?;
} else {
self.pins.b1().set_low()?;
}
if element.3 >= brightness {
self.pins.r2().set_high()?;
} else {
self.pins.r2().set_low()?;
}
if element.4 >= brightness {
self.pins.g2().set_high()?;
} else {
self.pins.g2().set_low()?;
}
if element.5 >= brightness {
self.pins.b2().set_high()?;
} else {
self.pins.b2().set_low()?;
}
self.pins.clk().set_high()?;
self.pins.clk().set_low()?;
}
self.pins.oe().set_high()?;
// Prevents ghosting, no idea why
delay.delay_us(2);
self.pins.lat().set_low()?;
delay.delay_us(2);
self.pins.lat().set_high()?;
// Select row
if count & 1 != 0 {
self.pins.a().set_high()?;
} else {
self.pins.a().set_low()?;
}
if count & 2 != 0 {
self.pins.b().set_high()?;
} else {
self.pins.b().set_low()?;
}
if count & 4 != 0 {
self.pins.c().set_high()?;
} else |
if count & 8 != 0 {
self.pins.d().set_high()?;
} else {
self.pins.d().set_low()?;
}
delay.delay_us(2);
self.pins.oe().set_low()?;
}
}
// Disable the output
// Prevents one row from being much brighter than the others
self.pins.oe().set_high()?;
Ok(())
}
/// Clear the output
///
/// It's a bit faster than using the embedded_graphics interface
/// to do the same
pub fn clear(&mut self) {
for row in self.data.iter_mut() {
for e in row.iter_mut() {
e.0 = 0;
e.1 = 0;
e.2 = 0;
e.3 = 0;
e.4 = 0;
e.5 = 0;
}
}
}
}
use embedded_graphics::{
drawable::{Dimensions, Pixel},
pixelcolor::Rgb565,
Drawing, SizedDrawing,
};
impl<PINS: Outputs> Drawing<Rgb565> for Hub75<PINS> {
fn draw<T>(&mut self, item_pixels: T)
where
T: IntoIterator<Item = Pixel<Rgb565>>,
{
// This table remaps linear input values
// (the numbers we’d like to use; e.g. 127 = half brightness)
// to nonlinear gamma-corrected output values
// (numbers producing the desired effect on the LED;
// e.g. 36 = half brightness).
const GAMMA8: [u8; 256] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11,
12, 12, 13, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22,
22, 23, 24, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37,
38, 39, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 79, 81, 82, 83, 85,
86, 87, 89, 90, 92, 93, 95, 96, 98, 99, 101, 102, 104, 105, 107, 109, 110, 112, 114,
115, 117, 119, 120, 122, 124, 126, 127, 129, 131, 133, 135, 137, 138, 140, 142, 144,
146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 167, 169, 171, 173, 175, 177, 180,
182, 184, 186, 189, 191, 193, 196, 198, 200, 203, 205, 208, 210, 213, 215, 218, 220,
223, 225, 228, 231, 233, 236, 239, 241, 244, 247, 249, 252, 255,
];
for Pixel(coord, color) in item_pixels {
let row = coord[1] % 16;
let data = &mut self.data[row as usize][coord[0] as usize];
if coord[1] >= 16 {
data.3 = GAMMA8[color.r() as usize];
data.4 = GAMMA8[color.g() as usize];
data.5 = GAMMA8[color.b() as usize];
} else {
data.0 = GAMMA8[color.r() as usize];
data.1 = GAMMA8[color.g() as usize];
data.2 = GAMMA8[color.b() as usize];
}
}
}
}
// TODO Does it make sense to include this?
impl<PINS: Outputs> SizedDrawing<Rgb565> for Hub75<PINS> {
fn draw_sized<T>(&mut self, item_pixels: T)
where
T: IntoIterator<Item = Pixel<Rgb565>> + Dimensions,
{
self.draw(item_pixels);
}
}
| {
self.pins.c().set_low()?;
} |
sw.py | import time
import numpy as np
from testbed._rust import sliding_window
x = np.random.randn(5000, 5) | rustout = sliding_window(x, 100, 1)
print("=" * 50)
print("Rust Speed: ", time.time() - s)
print(rustout.shape)
def sw(array, ws, over):
sl = len(array)
return [array[i:i+ws] for i in range(0, sl-ws, over)]
print("=" * 50)
s = time.time()
tmp = sw(x, 100, 1)
tmp = np.stack(tmp, 0)
print("Python Speed: ", time.time() - s)
print(tmp.shape) |
s = time.time() |
worker.rs | //! Worker thread implementation.
//!
//! The worker thread's main job is to hide the asynchrony of file loading from the main thread.
//! The interactive mode's custom `FileLoader` has to provide file contents synchronously, but
//! actually, obtaining file contents requires sending a request to the client and processing
//! messages until we get a response. That loop happens in the worker thread.
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use std::sync::mpsc::{Sender, SyncSender, Receiver};
use interact::{ToServer, ToClient};
pub enum ToWorker {
InputMessage(ToServer),
NeedFile(PathBuf, SyncSender<String>),
}
struct WorkerState {
to_client: SyncSender<ToClient>,
to_main: Sender<ToServer>,
pending_files: HashMap<PathBuf, SyncSender<String>>,
}
impl WorkerState {
fn new(to_client: SyncSender<ToClient>,
to_main: Sender<ToServer>) -> WorkerState {
WorkerState {
to_client: to_client,
to_main: to_main,
pending_files: HashMap::new(),
}
}
fn | (&mut self,
worker_recv: Receiver<ToWorker>) {
for msg in worker_recv.iter() {
self.handle_one(msg);
}
}
fn handle_one(&mut self, msg: ToWorker) {
use self::ToWorker::*;
use super::ToServer::*;
use super::ToClient::*;
match msg {
InputMessage(BufferText { file, content }) => {
info!("got text for file {:?}", file);
let path = fs::canonicalize(&file).unwrap();
let send = match self.pending_files.remove(&path) {
Some(x) => x,
None => {
warn!("got file {:?}, but no request for it is pending", path);
return;
},
};
send.send(content).unwrap();
},
NeedFile(path, send) => {
info!("got request for file {:?}", path);
assert!(!self.pending_files.contains_key(&path));
self.to_client.send(GetBufferText {
file: path.to_string_lossy().into_owned(),
}).unwrap();
self.pending_files.insert(path, send);
},
// Other messages pass through to the main thread. The channels we use are unbounded,
// so if the main thread is busy (most importantly, if it's waiting on some file
// contents), then the message will be queued.
InputMessage(msg) => {
self.to_main.send(msg).unwrap();
},
}
}
}
pub fn run_worker(recv: Receiver<ToWorker>,
to_client: SyncSender<ToClient>,
to_main: Sender<ToServer>) {
let mut state = WorkerState::new(to_client, to_main);
state.run_loop(recv);
}
| run_loop |
buffer_test.go | package xml
import (
"bytes"
"testing"
"github.com/tdewolff/parse/v2/xml"
"github.com/tdewolff/test"
)
func TestBuffer(t *testing.T) {
// 0 12 3 45 6 7 8 9 0
s := `<p><a href="//url">text</a>text<!--comment--></p>`
z := NewTokenBuffer(xml.NewLexer(bytes.NewBufferString(s)))
tok := z.Shift()
test.That(t, string(tok.Text) == "p", "first token is <p>")
test.That(t, z.pos == 0, "shift first token and restore position")
test.That(t, len(z.buf) == 0, "shift first token and restore length")
test.That(t, string(z.Peek(2).Text) == "href", "third token is href")
test.That(t, z.pos == 0, "don't change position after peeking")
test.That(t, len(z.buf) == 3, "two tokens after peeking")
test.That(t, string(z.Peek(8).Text) == "p", "ninth token is <p>")
test.That(t, z.pos == 0, "don't change position after peeking")
test.That(t, len(z.buf) == 9, "nine tokens after peeking") |
_ = z.Shift()
tok = z.Shift()
test.That(t, string(tok.Text) == "a", "third token is <a>")
test.That(t, z.pos == 2, "don't change position after peeking")
} |
test.That(t, z.Peek(9).TokenType == xml.ErrorToken, "tenth token is an error")
test.That(t, z.Peek(9) == z.Peek(10), "tenth and eleventh token are EOF")
test.That(t, len(z.buf) == 10, "ten tokens after peeking") |
module.rs | use crate::{ctx_desc::ContextDescriptor, ptr::RelativeDirectPointerNonNull};
use std::os::raw::c_char;
/// A context descriptor for a module.
#[repr(C)]
#[derive(Clone, Debug)]
pub struct | {
/// The base context descriptor.
pub base: ContextDescriptor,
/// The module name.
pub name: RelativeDirectPointerNonNull<c_char>,
}
| ModuleContextDescriptor |
api_group_list.rs | // Generated from definition io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList
/// APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct APIGroupList {
/// groups is a list of APIGroup.
pub groups: Vec<crate::v1_13::apimachinery::pkg::apis::meta::v1::APIGroup>,
}
impl crate::Resource for APIGroupList {
fn api_version() -> &'static str {
"v1"
}
fn group() -> &'static str {
""
}
fn kind() -> &'static str {
"APIGroupList"
}
fn version() -> &'static str {
"v1"
}
}
impl<'de> serde::Deserialize<'de> for APIGroupList {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_groups,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"groups" => Field::Key_groups,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = APIGroupList;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct APIGroupList")
}
fn | <A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_groups: Option<Vec<crate::v1_13::apimachinery::pkg::apis::meta::v1::APIGroup>> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::api_version() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version()));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::kind() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind()));
}
},
Field::Key_groups => value_groups = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(APIGroupList {
groups: value_groups.ok_or_else(|| serde::de::Error::missing_field("groups"))?,
})
}
}
deserializer.deserialize_struct(
"APIGroupList",
&[
"apiVersion",
"kind",
"groups",
],
Visitor,
)
}
}
impl serde::Serialize for APIGroupList {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"APIGroupList",
3,
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?;
serde::ser::SerializeStruct::serialize_field(&mut state, "groups", &self.groups)?;
serde::ser::SerializeStruct::end(state)
}
}
| visit_map |
wsgi.py | """
WSGI config for LoginAndRegistration project.
It exposes the WSGI callable as a module-level variable named ``application``. |
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LoginAndRegistration.settings")
application = get_wsgi_application() | |
test_arrays.py |
from symarray.calculus.integers import Integer
from symarray.calculus.arrays import Array
from symarray.shape import NDShape
def | ():
a = Array('a')
b = Array('b', shape=NDShape((Integer('n1'), Integer('n2'))))
n = Integer('n')
expr = a+n*a+2+b
print(expr.shape)
print(expr[1])
| test_basic |
dec.rs | //! Check declarations and expressions.
use crate::ast::{Cases, DatBind, Dec, ExBindInner, Exp, Label, Long, TyBind};
use crate::intern::StrRef;
use crate::loc::Located;
use crate::statics::ck::util::{
env_ins, env_merge, generalize, get_env, get_ty_sym, get_val_info, insert_ty_vars, instantiate,
};
use crate::statics::ck::{exhaustive, pat, ty};
use crate::statics::types::{
Cx, Env, Error, Item, Pat, Result, State, StrEnv, Ty, TyEnv, TyInfo, TyScheme, TyVar, Tys,
ValEnv, ValInfo,
};
use std::collections::{BTreeMap, HashMap, HashSet};
fn ck_exp(cx: &Cx, st: &mut State, exp: &Located<Exp<StrRef>>) -> Result<Ty> {
// The special constants are as per SML Definition (1). Note that SML Definition (5) is handled by
// the parser and SML Definition (7) is handled by having atomic and non-atomic expressions be
// part of the same enum.
match &exp.val {
Exp::DecInt(_) | Exp::HexInt(_) => Ok(Ty::INT),
Exp::DecWord(_) | Exp::HexWord(_) => Ok(Ty::WORD),
Exp::Real(_) => Ok(Ty::REAL),
Exp::String(_) => Ok(Ty::STRING),
Exp::Char(_) => Ok(Ty::CHAR),
// SML Definition (2). Note that Subst, instantiate, generalize, unify, etc are all borne from
// the comment on this rule: "The instantiation of type schemes allows different occurrences of
// a single longvid to assume different types."
Exp::LongVid(vid) => {
let val_info = get_val_info(get_env(&cx.env, vid)?, vid.last)?;
Ok(instantiate(st, &val_info.ty_scheme))
}
// SML Definition (3)
Exp::Record(rows) => {
let mut ty_rows = BTreeMap::new();
// SML Definition (6)
for row in rows {
let ty = ck_exp(cx, st, &row.val)?;
if ty_rows.insert(row.lab.val, ty).is_some() {
return Err(row.lab.loc.wrap(Error::DuplicateLabel(row.lab.val)));
}
}
Ok(Ty::Record(ty_rows))
}
Exp::Select(..) => Err(exp.loc.wrap(Error::Todo("record selectors"))),
// SML Definition Appendix A - tuples are sugar for records
Exp::Tuple(exps) => {
let mut ty_rows = BTreeMap::new();
for (idx, exp) in exps.iter().enumerate() {
let ty = ck_exp(cx, st, exp)?;
assert!(ty_rows.insert(Label::tuple(idx), ty).is_none());
}
Ok(Ty::Record(ty_rows))
}
// SML Definition Appendix A - lists are sugar for cons + nil
Exp::List(exps) => {
let elem = Ty::Var(st.new_ty_var(false));
for exp in exps {
let ty = ck_exp(cx, st, exp)?;
st.unify(exp.loc, elem.clone(), ty)?;
}
Ok(Ty::list(elem))
}
// SML Definition Appendix A - sequences ignore all but the last expression
Exp::Sequence(exps) => {
let mut ret = None;
for exp in exps {
ret = Some(ck_exp(cx, st, exp)?);
}
Ok(ret.unwrap())
}
// SML Definition (4)
Exp::Let(dec, exps) => {
let gen_syms = st.generated_syms();
let env = ck(cx, st, dec)?;
let mut cx = cx.clone();
cx.o_plus(env);
let mut last = None;
for exp in exps {
last = Some((exp.loc, ck_exp(&cx, st, exp)?));
}
let (loc, mut ty) = last.unwrap();
ty.apply(&st.subst);
if !gen_syms.contains(&ty.ty_names()) {
return Err(loc.wrap(Error::TyNameEscape));
}
Ok(ty)
}
// SML Definition (8)
Exp::App(func, arg) => {
let func_ty = ck_exp(cx, st, func)?;
let arg_ty = ck_exp(cx, st, arg)?;
// we don't actually _need_ to case on func_ty, since the Var case is actually correct for
// _all_ types. we just do this to produce better error messages in the Record and Ctor cases.
match func_ty {
Ty::Var(tv) => {
if st.subst.is_bound(&tv) {
Err(exp.loc.wrap(Error::NotArrowTy(func_ty)))
} else {
let ret_ty = Ty::Var(st.new_ty_var(false));
let arrow_ty = Ty::Arrow(arg_ty.into(), ret_ty.clone().into());
st.unify(exp.loc, func_ty, arrow_ty)?;
Ok(ret_ty)
}
}
Ty::Arrow(func_arg_ty, func_ret_ty) => {
st.unify(exp.loc, *func_arg_ty, arg_ty)?;
Ok(*func_ret_ty)
}
Ty::Record(_) | Ty::Ctor(_, _) => Err(exp.loc.wrap(Error::NotArrowTy(func_ty))),
}
}
// SML Definition (8). Infix application is the same as `op`ing the infix operator and applying
// it to a tuple (lhs, rhs).
Exp::InfixApp(lhs, func, rhs) => {
let val_info = get_val_info(&cx.env, *func)?;
let func_ty = instantiate(st, &val_info.ty_scheme);
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
let ret_ty = Ty::Var(st.new_ty_var(false));
let arrow_ty = Ty::Arrow(Ty::pair(lhs_ty, rhs_ty).into(), ret_ty.clone().into());
st.unify(exp.loc, func_ty, arrow_ty)?;
Ok(ret_ty)
}
// SML Definition (9)
Exp::Typed(inner, ty) => {
let exp_ty = ck_exp(cx, st, inner)?;
let ty_ty = ty::ck(cx, &st.tys, ty)?;
st.unify(exp.loc, ty_ty, exp_ty.clone())?;
Ok(exp_ty)
}
// SML Definition Appendix A - boolean operators are sugar for `if`
Exp::Andalso(lhs, rhs) | Exp::Orelse(lhs, rhs) => {
let lhs_ty = ck_exp(cx, st, lhs)?;
let rhs_ty = ck_exp(cx, st, rhs)?;
st.unify(lhs.loc, Ty::BOOL, lhs_ty)?;
st.unify(rhs.loc, Ty::BOOL, rhs_ty)?;
Ok(Ty::BOOL)
}
// SML Definition (10)
Exp::Handle(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_handle(pats)?;
st.unify(exp.loc, Ty::EXN, arg_ty)?;
st.unify(exp.loc, head_ty.clone(), res_ty)?;
Ok(head_ty)
}
// SML Definition (11)
Exp::Raise(exp) => {
let exp_ty = ck_exp(cx, st, exp)?;
st.unify(exp.loc, Ty::EXN, exp_ty)?;
Ok(Ty::Var(st.new_ty_var(false)))
}
// SML Definition Appendix A - `if` is sugar for casing
Exp::If(cond, then_e, else_e) => {
let cond_ty = ck_exp(cx, st, cond)?;
let then_ty = ck_exp(cx, st, then_e)?;
let else_ty = ck_exp(cx, st, else_e)?;
st.unify(cond.loc, Ty::BOOL, cond_ty)?;
st.unify(exp.loc, then_ty.clone(), else_ty)?;
Ok(then_ty)
}
Exp::While(..) => Err(exp.loc.wrap(Error::Todo("`while`"))),
// SML Definition Appendix A - `case` is sugar for application to a `fn`
Exp::Case(head, cases) => {
let head_ty = ck_exp(cx, st, head)?;
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
st.unify(exp.loc, head_ty, arg_ty)?;
Ok(res_ty)
}
// SML Definition (12)
Exp::Fn(cases) => {
let (pats, arg_ty, res_ty) = ck_cases(cx, st, cases)?;
exhaustive::ck_match(pats, exp.loc)?;
Ok(Ty::Arrow(arg_ty.into(), res_ty.into()))
}
}
}
/// SML Definition (13)
fn ck_cases(cx: &Cx, st: &mut State, cases: &Cases<StrRef>) -> Result<(Vec<Located<Pat>>, Ty, Ty)> {
let arg_ty = Ty::Var(st.new_ty_var(false));
let res_ty = Ty::Var(st.new_ty_var(false));
let mut pats = Vec::with_capacity(cases.arms.len());
// SML Definition (14)
for arm in cases.arms.iter() {
let (val_env, pat_ty, pat) = pat::ck(cx, st, &arm.pat)?;
pats.push(arm.pat.loc.wrap(pat));
let mut cx = cx.clone();
cx.env.val_env.extend(val_env);
let exp_ty = ck_exp(&cx, st, &arm.exp)?;
st.unify(arm.pat.loc, arg_ty.clone(), pat_ty)?;
st.unify(arm.exp.loc, res_ty.clone(), exp_ty)?;
}
Ok((pats, arg_ty, res_ty))
}
/// Returns `Ok(())` iff `name` is not a forbidden binding name. TODO there are more of these in
/// certain situations
fn | (name: Located<StrRef>) -> Result<()> {
let val = name.val;
if val == StrRef::TRUE
|| val == StrRef::FALSE
|| val == StrRef::NIL
|| val == StrRef::CONS
|| val == StrRef::REF
{
return Err(name.loc.wrap(Error::ForbiddenBinding(name.val)));
}
Ok(())
}
struct FunInfo {
args: Vec<TyVar>,
ret: TyVar,
}
fn fun_infos_to_ve(fun_infos: &HashMap<StrRef, FunInfo>) -> ValEnv {
fun_infos
.iter()
.map(|(&name, fun_info)| {
let ty = fun_info
.args
.iter()
.rev()
.fold(Ty::Var(fun_info.ret), |ac, &tv| {
Ty::Arrow(Ty::Var(tv).into(), ac.into())
});
(name, ValInfo::val(TyScheme::mono(ty)))
})
.collect()
}
pub fn ck(cx: &Cx, st: &mut State, dec: &Located<Dec<StrRef>>) -> Result<Env> {
match &dec.val {
// SML Definition (15)
Dec::Val(ty_vars, val_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut val_env = ValEnv::new();
// SML Definition (25)
for val_bind in val_binds {
// SML Definition (26)
if val_bind.rec {
return Err(dec.loc.wrap(Error::Todo("recursive val binds")));
}
let (other, pat_ty, pat) = pat::ck(cx, st, &val_bind.pat)?;
for &name in other.keys() {
ck_binding(val_bind.pat.loc.wrap(name))?;
}
let exp_ty = ck_exp(cx, st, &val_bind.exp)?;
st.unify(dec.loc, pat_ty.clone(), exp_ty)?;
exhaustive::ck_bind(pat, val_bind.pat.loc)?;
for (name, mut val_info) in other {
generalize(cx, st, ty_vars, &mut val_info.ty_scheme);
let name = val_bind.pat.loc.wrap(name);
env_ins(&mut val_env, name, val_info, Item::Val)?;
}
}
Ok(val_env.into())
}
// SML Definition Appendix A - `fun` is sugar for `val rec` and `case`
Dec::Fun(ty_vars, fval_binds) => {
let mut cx_cl;
let cx = if ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, ty_vars)?;
&cx_cl
};
let mut fun_infos = HashMap::with_capacity(fval_binds.len());
for fval_bind in fval_binds {
let first = fval_bind.cases.first().unwrap();
let info = FunInfo {
args: first.pats.iter().map(|_| st.new_ty_var(false)).collect(),
ret: st.new_ty_var(false),
};
// copied from env_ins in util
if fun_infos.insert(first.vid.val, info).is_some() {
let err = Error::Duplicate(Item::Val, first.vid.val);
return Err(first.vid.loc.wrap(err));
}
}
for fval_bind in fval_binds {
let name = fval_bind.cases.first().unwrap().vid.val;
let info = fun_infos.get(&name).unwrap();
let mut arg_pats = Vec::with_capacity(fval_bind.cases.len());
for case in fval_bind.cases.iter() {
if name != case.vid.val {
let err = Error::FunDecNameMismatch(name, case.vid.val);
return Err(case.vid.loc.wrap(err));
}
if info.args.len() != case.pats.len() {
let err = Error::FunDecWrongNumPats(info.args.len(), case.pats.len());
let begin = case.pats.first().unwrap().loc;
let end = case.pats.last().unwrap().loc;
return Err(begin.span(end).wrap(err));
}
let mut pats_val_env = ValEnv::new();
let mut arg_pat = Vec::with_capacity(info.args.len());
for (pat, &tv) in case.pats.iter().zip(info.args.iter()) {
let (ve, pat_ty, new_pat) = pat::ck(cx, st, pat)?;
st.unify(pat.loc, Ty::Var(tv), pat_ty)?;
env_merge(&mut pats_val_env, ve, pat.loc, Item::Val)?;
arg_pat.push(new_pat);
}
let begin = case.pats.first().unwrap().loc;
let end = case.pats.last().unwrap().loc;
arg_pats.push(begin.span(end).wrap(Pat::record(arg_pat)));
if let Some(ty) = &case.ret_ty {
let new_ty = ty::ck(cx, &st.tys, ty)?;
st.unify(ty.loc, Ty::Var(info.ret), new_ty)?;
}
let mut cx = cx.clone();
// no dupe checking here - intentionally shadow.
cx.env.val_env.extend(fun_infos_to_ve(&fun_infos));
cx.env.val_env.extend(pats_val_env);
let body_ty = ck_exp(&cx, st, &case.body)?;
st.unify(case.body.loc, Ty::Var(info.ret), body_ty)?;
}
let begin = fval_bind.cases.first().unwrap().vid.loc;
let end = fval_bind.cases.last().unwrap().body.loc;
exhaustive::ck_match(arg_pats, begin.span(end))?;
}
let mut val_env = fun_infos_to_ve(&fun_infos);
for val_info in val_env.values_mut() {
generalize(cx, st, ty_vars, &mut val_info.ty_scheme);
}
Ok(val_env.into())
}
// SML Definition (16)
Dec::Type(ty_binds) => ck_ty_binds(cx, st, ty_binds),
// SML Definition (17)
Dec::Datatype(dat_binds, ty_binds) => {
let mut env = ck_dat_binds(cx.clone(), st, dat_binds)?;
// SML Definition Appendix A - `datatype withtype` is sugar for `datatype; type`
let mut cx = cx.clone();
cx.o_plus(env.clone());
env.extend(ck_ty_binds(&cx, st, ty_binds)?);
Ok(env)
}
// SML Definition (18)
Dec::DatatypeCopy(ty_con, long) => ck_dat_copy(cx, &st.tys, *ty_con, long),
// SML Definition (19)
Dec::Abstype(..) => Err(dec.loc.wrap(Error::Todo("`abstype`"))),
// SML Definition (20)
Dec::Exception(ex_binds) => {
let mut val_env = ValEnv::new();
for ex_bind in ex_binds {
let val_info = match &ex_bind.inner {
// SML Definition (30)
ExBindInner::Ty(ty) => match ty {
None => ValInfo::exn(),
Some(ty) => ValInfo::exn_fn(ty::ck(cx, &st.tys, ty)?),
},
// SML Definition (31)
ExBindInner::Long(vid) => {
let val_info = get_val_info(get_env(&cx.env, vid)?, vid.last)?;
if !val_info.id_status.is_exn() {
return Err(vid.loc().wrap(Error::ExnWrongIdStatus(val_info.id_status)));
}
val_info.clone()
}
};
env_ins(&mut val_env, ex_bind.vid, val_info, Item::Val)?;
}
Ok(val_env.into())
}
// SML Definition (21)
Dec::Local(fst, snd) => {
let fst_env = ck(cx, st, fst)?;
let mut cx = cx.clone();
cx.o_plus(fst_env);
ck(&cx, st, snd)
}
// SML Definition (22)
Dec::Open(longs) => {
let mut env = Env::default();
for long in longs {
env.extend(get_env(&cx.env, long)?.clone());
}
Ok(env)
}
// SML Definition (23), SML Definition (24)
Dec::Seq(decs) => {
let mut cx = cx.clone();
let mut ret = Env::default();
for dec in decs {
cx.o_plus(ret.clone());
let env = ck(&cx, st, dec)?;
ret.extend(env);
}
Ok(ret)
}
Dec::Infix(..) | Dec::Infixr(..) | Dec::Nonfix(..) => Ok(Env::default()),
}
}
/// SML Definition (16)
fn ck_ty_binds(cx: &Cx, st: &mut State, ty_binds: &[TyBind<StrRef>]) -> Result<Env> {
let mut ty_env = TyEnv::default();
// SML Definition (27)
for ty_bind in ty_binds {
let mut cx_cl;
let cx = if ty_bind.ty_vars.is_empty() {
cx
} else {
cx_cl = cx.clone();
insert_ty_vars(&mut cx_cl, st, &ty_bind.ty_vars)?;
&cx_cl
};
let ty = ty::ck(cx, &st.tys, &ty_bind.ty)?;
let sym = st.new_sym(ty_bind.ty_con);
env_ins(&mut ty_env.inner, ty_bind.ty_con, sym, Item::Ty)?;
// TODO better equality checks
let equality = ty.is_equality(&st.tys);
let info = TyInfo {
ty_fcn: TyScheme {
ty_vars: ty_bind
.ty_vars
.iter()
.map(|tv| {
let tv = *cx.ty_vars.get(&tv.val).unwrap();
st.subst.remove_bound(&tv);
tv
})
.collect(),
ty,
overload: None,
},
val_env: ValEnv::new(),
equality,
};
st.tys.insert(sym, info);
}
Ok(ty_env.into())
}
/// SML Definition (17), SML Definition (71). The checking for {datatype, constructor} {bindings,
/// descriptions} appear to be essentially identical, so we can unite the ASTs and static checking
/// functions (i.e. this function).
pub fn ck_dat_binds(mut cx: Cx, st: &mut State, dat_binds: &[DatBind<StrRef>]) -> Result<Env> {
// these two are across all `DatBind`s.
let mut ty_env = TyEnv::default();
let mut val_env = ValEnv::new();
// we must first generate new symbols for _all_ the types being defined, since they are allowed to
// reference each other. (apparently? according to SML NJ, but it seems like the Definition does
// not indicate this, according to my reading of e.g. SML Definition (28).)
let mut syms = Vec::new();
for dat_bind in dat_binds {
// create a new symbol for the type being generated with this `DatBind`.
let sym = st.new_sym(dat_bind.ty_con);
// tell the original context as well as the overall `TyEnv` that we return that this new
// datatype does exist, but tell the State that it has just an empty `ValEnv`. also perform dupe
// checking on the name of the new type and assert for sanity checking after the dupe check.
env_ins(&mut ty_env.inner, dat_bind.ty_con, sym, Item::Ty)?;
// no assert is_none since we may be shadowing something from an earlier Dec in this Cx.
cx.env.ty_env.inner.insert(dat_bind.ty_con.val, sym);
// no mapping from ast ty vars to statics ty vars here. we just need some ty vars to make the
// `TyScheme`. pretty much copied from `insert_ty_vars`.
let mut set = HashSet::new();
let mut ty_vars = Vec::new();
for tv in dat_bind.ty_vars.iter() {
if !set.insert(tv.val.name) {
return Err(tv.loc.wrap(Error::Duplicate(Item::TyVar, tv.val.name)));
}
let new_tv = st.new_ty_var(tv.val.equality);
ty_vars.push(new_tv);
// no need to `insert_bound` because no unifying occurs.
}
let ty_args: Vec<_> = ty_vars.iter().copied().map(Ty::Var).collect();
let ty_fcn = TyScheme {
ty_vars,
ty: Ty::Ctor(ty_args, sym),
overload: None,
};
st.tys.insert_datatype(sym, ty_fcn);
syms.push(sym);
}
// SML Definition (28), SML Definition (81)
for (dat_bind, sym) in dat_binds.iter().zip(syms) {
// note that we have to `get` here and then `get_mut` again later because of the borrow checker.
let ty_fcn = &st.tys.get(&sym).ty_fcn;
let mut cx_cl;
let cx = if dat_bind.ty_vars.is_empty() {
&cx
} else {
// it is here that we introduce the mapping from ast ty vars to statics ty vars. we need to do
// that in order to check the `ConBind`s. but we cannot introduce the mapping earlier, when we
// were generating the statics ty vars and the `Sym`s, because there may be multiple
// identically-named ty vars in different `DatBind`s.
//
// if we wanted we could generate new statics type variables here, but then we'd have to use
// those new type variables in the return type of the ctor. it shouldn't matter whether we
// generate new type variables here or not (as mentioned, we choose to not) because both the
// type function and the ctors of the type will each have a `TyScheme` that binds the type
// variables appropriately, so by the magic of alpha conversion they're all distinct anyway.
cx_cl = cx.clone();
assert_eq!(dat_bind.ty_vars.len(), ty_fcn.ty_vars.len());
for (ast_tv, &tv) in dat_bind.ty_vars.iter().zip(ty_fcn.ty_vars.iter()) {
cx_cl.ty_vars.insert(ast_tv.val, tv);
}
&cx_cl
};
// this ValEnv is specific to this `DatBind`.
let mut bind_val_env = ValEnv::new();
let mut equality = true;
// SML Definition (29), SML Definition (82)
for con_bind in dat_bind.cons.iter() {
ck_binding(con_bind.vid)?;
// if there is no `of t`, then the type of the ctor is just `T`, where `T` is the new sym type
// that is being defined.
let mut ty = ty_fcn.ty.clone();
if let Some(arg_ty) = &con_bind.ty {
// if there is an `of t`, then the type of the ctor is `t -> T`. we must also update whether
// `T` respects equality based on whether `t` does. TODO this doesn't handle the equality
// check correctly.
let t = ty::ck(cx, &st.tys, arg_ty)?;
equality = equality && t.is_equality(&st.tys);
ty = Ty::Arrow(t.into(), ty.into());
}
let val_info = ValInfo::ctor(TyScheme {
ty_vars: ty_fcn.ty_vars.clone(),
ty,
overload: None,
});
// insert the `ValInfo` into the _overall_ `ValEnv` with dupe checking.
env_ins(&mut val_env, con_bind.vid, val_info.clone(), Item::Val)?;
// _also_ insert the `ValInfo` into the `DatBind`-specific `ValEnv`, but this time dupe
// checking is unnecessary (just assert as a sanity check).
assert!(bind_val_env.insert(con_bind.vid.val, val_info).is_none());
}
// now the `ValEnv` is complete, so we may update `st.tys` with the true definition of this
// datatype.
st.tys.finish_datatype(&sym, bind_val_env, equality);
}
Ok(Env {
ty_env,
val_env,
str_env: StrEnv::new(),
})
}
/// SML Definition (18), SML Definition (72)
pub fn ck_dat_copy(
cx: &Cx,
tys: &Tys,
ty_con: Located<StrRef>,
long: &Long<StrRef>,
) -> Result<Env> {
let sym = get_ty_sym(get_env(&cx.env, long)?, long.last)?;
let val_env = tys.get(&sym).val_env.clone();
if val_env.is_empty() {
return Err(long.loc().wrap(Error::DatatypeCopyNotDatatype));
}
Ok(Env {
str_env: StrEnv::new(),
ty_env: TyEnv {
inner: BTreeMap::from([(ty_con.val, sym)]),
},
val_env,
})
}
| ck_binding |
test_forms.py | from django.test import TestCase
from django.contrib.auth.models import User
from users.forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
class TestForms(TestCase):
def test_user_register_form_valid(self):
form = UserRegisterForm(
data = {
'username' :"user2",
'email' : "[email protected]",
'password1' : "user2password",
'password2' : "user2password"
}
)
self.assertTrue(form.is_valid())
def | (self):
form = UserRegisterForm(
data = {
'username' :"user2",
'email' : "",
'password1' : "user1password",
'password2' : "user2password"
}
)
self.assertFalse(form.is_valid())
def test_user_update_form_valid(self):
form = UserUpdateForm(
data = {
'username' :"user2",
'email' : "[email protected]"
}
)
self.assertTrue(form.is_valid())
def test_user_update_form_invalid(self):
form = UserUpdateForm(
data = {
'username' :"user2",
'email' : ""
}
)
self.assertFalse(form.is_valid())
def test_user_profile_update_form_invalid(self):
form = UserUpdateForm(
data = {
'image' :""
}
)
self.assertFalse(form.is_valid()) | test_user_register_form_invalid |
fmt.rs | use std::fmt;
struct ANSIString {
color_code: Option<&'static str>,
s: String,
}
impl ANSIString {
pub fn new<S: Into<String>>(color_code: &'static str, s: S) -> Self {
Self {
color_code: Some(color_code),
s: s.into(),
}
}
// don't set any colors
pub fn from<S: Into<String>>(s: S) -> Self {
Self {
color_code: None,
s: s.into(),
}
} | }
impl fmt::Display for ANSIString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(color_code) = self.color_code {
write!(
f,
"{}{}{}",
color_code,
&self.s,
classicube_helpers::color::WHITE
)
} else {
write!(f, "{}", &self.s,)
}
}
}
enum Color {
Green,
RedBold,
Yellow,
}
impl Color {
pub fn paint(&self, s: &str) -> ANSIString {
let color_code = match self {
Color::Green => classicube_helpers::color::LIME,
Color::RedBold => classicube_helpers::color::RED,
Color::Yellow => classicube_helpers::color::YELLOW,
};
ANSIString::new(color_code, s)
}
}
#[doc(hidden)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ColorWhen {
Auto,
Always,
Never,
}
#[doc(hidden)]
pub struct ColorizerOption {
pub use_stderr: bool,
pub when: ColorWhen,
}
#[doc(hidden)]
pub struct Colorizer {
when: ColorWhen,
}
macro_rules! color {
($_self:ident, $c:ident, $m:expr) => {
match $_self.when {
ColorWhen::Auto => Format::$c($m),
ColorWhen::Always => Format::$c($m),
ColorWhen::Never => Format::None($m),
}
};
}
impl Colorizer {
pub fn new(option: ColorizerOption) -> Colorizer { Colorizer { when: option.when } }
pub fn good<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::good;");
color!(self, Good, msg)
}
pub fn warning<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::warning;");
color!(self, Warning, msg)
}
pub fn error<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::error;");
color!(self, Error, msg)
}
pub fn none<T>(&self, msg: T) -> Format<T>
where
T: fmt::Display + AsRef<str>,
{
debugln!("Colorizer::none;");
Format::None(msg)
}
}
impl Default for Colorizer {
fn default() -> Self {
Colorizer::new(ColorizerOption {
use_stderr: true,
when: ColorWhen::Auto,
})
}
}
/// Defines styles for different types of error messages. Defaults to Error=Red, Warning=Yellow,
/// and Good=Green
#[derive(Debug)]
#[doc(hidden)]
pub enum Format<T> {
/// Defines the style used for errors, defaults to Red
Error(T),
/// Defines the style used for warnings, defaults to Yellow
Warning(T),
/// Defines the style used for good values, defaults to Green
Good(T),
/// Defines no formatting style
None(T),
}
impl<T: AsRef<str>> Format<T> {
fn format(&self) -> ANSIString {
match *self {
Format::Error(ref e) => Color::RedBold.paint(e.as_ref()),
Format::Warning(ref e) => Color::Yellow.paint(e.as_ref()),
Format::Good(ref e) => Color::Green.paint(e.as_ref()),
Format::None(ref e) => ANSIString::from(e.as_ref()),
}
}
}
impl<T: AsRef<str>> fmt::Display for Format<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", &self.format()) }
}
#[cfg(all(test, feature = "color"))]
mod test {
use super::*;
#[test]
fn colored_output() {
let err = Format::Error("error");
assert_eq!(
&*format!("{}", err),
&*format!("{}", Color::RedBold.paint("error"))
);
let good = Format::Good("good");
assert_eq!(
&*format!("{}", good),
&*format!("{}", Color::Green.paint("good"))
);
let warn = Format::Warning("warn");
assert_eq!(
&*format!("{}", warn),
&*format!("{}", Color::Yellow.paint("warn"))
);
let none = Format::None("none");
assert_eq!(
&*format!("{}", none),
&*format!("{}", ANSIString::from("none"))
);
}
} | |
check.go | package sched
import (
"fmt"
"math"
"sort"
"time"
"bosun.org/cmd/bosun/cache"
"bosun.org/cmd/bosun/conf"
"bosun.org/cmd/bosun/expr"
"bosun.org/collect"
"bosun.org/metadata"
"bosun.org/models"
"bosun.org/opentsdb"
"bosun.org/slog"
"github.com/MiniProfiler/go/miniprofiler"
)
func init() {
metadata.AddMetricMeta(
"bosun.alerts.current_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by current severity.")
metadata.AddMetricMeta(
"bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert,
"The number of open alerts by last abnormal severity.")
metadata.AddMetricMeta(
"bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by acknowledgement status.")
metadata.AddMetricMeta(
"bosun.alerts.active_status", metadata.Gauge, metadata.Alert,
"The number of open alerts by active status.")
metadata.AddMetricMeta("alerts.acknowledgement_status_by_notification", metadata.Gauge, metadata.Alert,
"The number of alerts by acknowledgement status and notification. Does not reflect escalation chains.")
metadata.AddMetricMeta("alerts.oldest_unacked_by_notification", metadata.Gauge, metadata.Second,
"How old the oldest unacknowledged notification is by notification.. Does not reflect escalation chains.")
collect.AggregateMeta("bosun.template.render", metadata.MilliSecond, "The amount of time it takes to render the specified alert template.")
}
func NewIncident(ak models.AlertKey) *models.IncidentState {
s := &models.IncidentState{}
s.Start = utcNow()
s.AlertKey = ak
s.Alert = ak.Name()
s.Tags = ak.Group().Tags()
s.Result = &models.Result{}
return s
}
type RunHistory struct {
Cache *cache.Cache
Start time.Time
Backends *expr.Backends
Events map[models.AlertKey]*models.Event
schedule *Schedule
}
// AtTime creates a new RunHistory starting at t with the same context and
// events as rh.
func (rh *RunHistory) AtTime(t time.Time) *RunHistory {
n := *rh
n.Start = t
return &n
}
func (s *Schedule) NewRunHistory(start time.Time, cache *cache.Cache) *RunHistory {
r := &RunHistory{
Cache: cache,
Start: start,
Events: make(map[models.AlertKey]*models.Event),
schedule: s,
Backends: &expr.Backends{
TSDBContext: s.SystemConf.GetTSDBContext(),
GraphiteContext: s.SystemConf.GetGraphiteContext(),
InfluxConfig: s.SystemConf.GetInfluxContext(),
ElasticHosts: s.SystemConf.GetElasticContext(),
AzureMonitor: s.SystemConf.GetAzureMonitorContext(),
PromConfig: s.SystemConf.GetPromContext(),
},
}
return r
}
// RunHistory processes an event history and triggers notifications if needed.
func (s *Schedule) RunHistory(r *RunHistory) {
checkNotify := false
silenced := s.Silenced()
for ak, event := range r.Events {
shouldNotify, err := s.runHistory(r, ak, event, silenced)
checkNotify = checkNotify || shouldNotify
if err != nil {
slog.Errorf("Error in runHistory for %s. %s.", ak, err)
}
}
if checkNotify && s.nc != nil {
select {
case s.nc <- true:
default:
}
}
}
// RunHistory for a single alert key. Returns true if notifications were altered.
func (s *Schedule) runHistory(r *RunHistory, ak models.AlertKey, event *models.Event, silenced SilenceTester) (checkNotify bool, err error) {
event.Time = r.Start
a := s.RuleConf.GetAlert(ak.Name())
if a.UnknownsNormal && event.Status == models.StUnknown {
event.Status = models.StNormal
}
data := s.DataAccess.State()
err = data.TouchAlertKey(ak, utcNow())
if err != nil {
return
}
si := silenced(ak)
// get existing open incident if exists
var incident *models.IncidentState
rt := &models.RenderedTemplates{}
incident, err = data.GetOpenIncident(ak)
if err != nil {
return
}
defer func() {
// save unless incident is new and closed (log alert)
if incident != nil && (incident.Id != 0 || incident.Open) {
_, err = data.UpdateIncidentState(incident)
err = data.SetRenderedTemplates(incident.Id, rt)
} else {
err = data.SetUnevaluated(ak, event.Unevaluated) // if nothing to save, at least store the unevaluated state
if err != nil {
return
}
}
}()
if incident != nil {
rt, err = data.GetRenderedTemplates(incident.Id)
if err != nil {
return
}
for i, action := range incident.Actions {
if action.Type == models.ActionDelayedClose && !(action.Fullfilled || action.Cancelled) {
if event.Status > incident.WorstStatus {
// If the lifetime severity of the incident has increased, cancel the delayed close
err = s.ActionByAlertKey("bosun", "cancelled delayed close due to severity increase", models.ActionCancelClose, nil, ak)
if err != nil {
return
}
incident, err = data.GetIncidentState(incident.Id)
if err != nil {
return
}
// Continue processing alert after cancelling the delayed close
break
}
if action.Deadline == nil {
err = fmt.Errorf("should not be here - cancelled close without deadline")
return
}
if r.Start.Before(*action.Deadline) {
if event.Status == models.StNormal {
slog.Infof("closing alert %v on delayed close because the alert has returned to normal before deadline", incident.AlertKey)
if event.Status != incident.CurrentStatus {
incident.Events = append(incident.Events, *event)
}
incident.CurrentStatus = event.Status
// Action needs to know it is normal, so update the incident that action will read
_, err = data.UpdateIncidentState(incident)
if err != nil {
return
}
err = s.ActionByAlertKey("bosun", fmt.Sprintf("close on behalf of delayed close by %v", action.User), models.ActionClose, nil, ak)
if err != nil {
return
}
incident, err = data.GetIncidentState(incident.Id)
if err != nil {
return
}
incident.Actions[i].Fullfilled = true
return
}
} else {
// We are after Deadline
slog.Infof("force closing alert %v on delayed close because the alert is after the deadline", incident.AlertKey)
incident.Actions[i].Fullfilled = true
err = s.ActionByAlertKey("bosun", fmt.Sprintf("forceclose on behalf of delayed close by %v", action.User), models.ActionForceClose, nil, ak)
if err != nil {
return
}
incident, err = data.GetIncidentState(incident.Id)
if err != nil {
return
}
return
}
}
}
}
// If nothing is out of the ordinary we are done
if event.Status <= models.StNormal && incident == nil {
return
}
// if event is unevaluated, we are done also.
if incident != nil {
incident.Unevaluated = event.Unevaluated
}
if event.Unevaluated {
return
}
shouldNotify := false
newIncident := false
if incident == nil {
incident = NewIncident(ak)
newIncident = true
shouldNotify = true
}
// set state.Result according to event result
if event.Status == models.StCritical {
incident.Result = event.Crit
} else if event.Status == models.StWarning {
incident.Result = event.Warn
}
if event.Status > models.StNormal {
incident.LastAbnormalStatus = event.Status
incident.LastAbnormalTime = models.Epoch{Time: event.Time.UTC()}
}
if event.Status > incident.WorstStatus {
incident.WorstStatus = event.Status
shouldNotify = true
}
if event.Status != incident.CurrentStatus {
incident.Events = append(incident.Events, *event)
}
incident.CurrentStatus = event.Status
//run a preliminary save on new incidents to get an id
if newIncident {
if a.Log || silencedOrIgnored(a, event, si) {
//a log or silenced/ignored alert will not need to be saved
} else {
daState := s.DataAccess.State()
incident.Id, err = daState.UpdateIncidentState(incident)
if err != nil {
return
}
previousIds := []int64{}
previousIds, err = daState.GetAllIncidentIdsByAlertKey(ak)
if err != nil {
return
}
for _, id := range previousIds {
if incident.Id > id {
incident.PreviousIds = append(incident.PreviousIds, id)
}
}
sort.Slice(incident.PreviousIds, func(i, j int) bool {
return incident.PreviousIds[i] > incident.PreviousIds[j]
})
_, err = daState.UpdateIncidentState(incident)
if err != nil {
return
}
if len(incident.PreviousIds) > 0 {
err = daState.SetIncidentNext(incident.PreviousIds[0], incident.Id)
if err != nil {
return
}
}
}
}
//render templates and open alert key if abnormal
if event.Status > models.StNormal {
rt = s.executeTemplates(incident, event, a, r)
incident.Open = true
if a.Log {
incident.Open = false
}
}
// On state increase, clear old notifications and notify current.
// Do nothing if state did not change.
notify := func(ns *conf.Notifications) {
if a.Log {
lastLogTime := s.lastLogTimes[ak]
now := utcNow()
if now.Before(lastLogTime.Add(a.MaxLogFrequency)) {
return
}
s.lastLogTimes[ak] = now
}
nots := ns.Get(s.RuleConf, incident.AlertKey.Group())
for _, n := range nots {
s.Notify(incident, rt, n)
checkNotify = true
}
}
notifyCurrent := func() {
//Auto close ignoreUnknowns for new incident.
if silencedOrIgnored(a, event, si) || si != nil && si.Forget {
incident.Open = false
//auto forget
if si != nil && si.Forget {
slog.Infof("Auto forget enabled for %s", ak)
err := s.ActionByAlertKey("bosun", "Auto forget was enabled", models.ActionForget, nil, ak)
if err != nil {
slog.Errorln(err)
}
}
return
}
incident.NeedAck = true
switch event.Status {
case models.StCritical, models.StUnknown:
notify(a.CritNotification)
case models.StWarning:
notify(a.WarnNotification)
}
}
// lock while we change notifications.
s.Lock("RunHistory")
if shouldNotify {
incident.NeedAck = false
if err = s.DataAccess.Notifications().ClearNotifications(ak); err != nil {
return
}
notifyCurrent()
}
autoClose := func(ak models.AlertKey, reason string, sendNotification bool) {
msg := fmt.Sprintf("auto close %s because was %s", ak, reason)
slog.Infof(msg)
err := s.ActionByAlertKey("bosun", msg, models.ActionClose, nil, ak)
if err != nil {
slog.Errorln(err)
} else if sendNotification {
aks := []models.AlertKey{ak}
s.ActionNotify(models.ActionClose, "bosun", reason, aks)
}
}
if event.Status == models.StNormal {
if si := silenced(ak); si != nil {
go autoClose(ak, "silenced", false)
} else if a.CloseOnNormal {
go autoClose(ak, "normal", true)
}
}
s.Unlock()
return checkNotify, nil
}
func silencedOrIgnored(a *conf.Alert, event *models.Event, si *models.Silence) bool |
func (s *Schedule) executeTemplates(st *models.IncidentState, event *models.Event, a *conf.Alert, r *RunHistory) *models.RenderedTemplates {
if event.Status == models.StUnknown {
return nil
}
rt, errs := s.ExecuteAll(r, a, st, true)
if len(errs) > 0 {
for _, err := range errs {
slog.Errorf("rendering templates for %s: %s", a.Name, err)
}
subject, body, err := s.ExecuteBadTemplate(errs, r, a, st)
if err != nil {
subject = fmt.Sprintf("unable to create template error notification: %v", err)
}
rt.Subject = subject
if body != "" {
rt.Body = body
}
}
st.Subject = rt.Subject
return rt
}
// CollectStates sends various state information to bosun with collect.
func (s *Schedule) CollectStates() {
// [AlertName][Severity]Count
severityCounts := make(map[string]map[string]int64)
abnormalCounts := make(map[string]map[string]int64)
ackStatusCounts := make(map[string]map[bool]int64)
ackByNotificationCounts := make(map[string]map[bool]int64)
unAckOldestByNotification := make(map[string]time.Time)
activeStatusCounts := make(map[string]map[bool]int64)
// Initalize the Counts
for _, alert := range s.RuleConf.GetAlerts() {
severityCounts[alert.Name] = make(map[string]int64)
abnormalCounts[alert.Name] = make(map[string]int64)
var i models.Status
for i = 1; i.String() != "none"; i++ {
severityCounts[alert.Name][i.String()] = 0
abnormalCounts[alert.Name][i.String()] = 0
}
ackStatusCounts[alert.Name] = make(map[bool]int64)
activeStatusCounts[alert.Name] = make(map[bool]int64)
ackStatusCounts[alert.Name][false] = 0
activeStatusCounts[alert.Name][false] = 0
ackStatusCounts[alert.Name][true] = 0
activeStatusCounts[alert.Name][true] = 0
}
for notificationName := range s.RuleConf.GetNotifications() {
unAckOldestByNotification[notificationName] = time.Unix(1<<63-62135596801, 999999999)
ackByNotificationCounts[notificationName] = make(map[bool]int64)
ackByNotificationCounts[notificationName][false] = 0
ackByNotificationCounts[notificationName][true] = 0
}
//TODO:
// for _, state := range s.status {
// if !state.Open {
// continue
// }
// name := state.AlertKey.Name()
// alertDef := s.Conf.Alerts[name]
// nots := make(map[string]bool)
// for name := range alertDef.WarnNotification.Get(s.Conf, state.Group) {
// nots[name] = true
// }
// for name := range alertDef.CritNotification.Get(s.Conf, state.Group) {
// nots[name] = true
// }
// incident, err := s.GetIncident(state.Last().IncidentId)
// if err != nil {
// slog.Errorln(err)
// }
// for notificationName := range nots {
// ackByNotificationCounts[notificationName][state.NeedAck]++
// if incident != nil && incident.Start.Before(unAckOldestByNotification[notificationName]) && state.NeedAck {
// unAckOldestByNotification[notificationName] = incident.Start
// }
// }
// severity := state.CurrentStatus.String()
// lastAbnormal := state.LastAbnormalStatus.String()
// severityCounts[state.Alert][severity]++
// abnormalCounts[state.Alert][lastAbnormal]++
// ackStatusCounts[state.Alert][state.NeedAck]++
// activeStatusCounts[state.Alert][state.IsActive()]++
// }
for notification := range ackByNotificationCounts {
ts := opentsdb.TagSet{"notification": notification}
err := collect.Put("alerts.acknowledgement_status_by_notification",
ts.Copy().Merge(opentsdb.TagSet{"status": "unacknowledged"}),
ackByNotificationCounts[notification][true])
if err != nil {
slog.Errorln(err)
}
err = collect.Put("alerts.acknowledgement_status_by_notification",
ts.Copy().Merge(opentsdb.TagSet{"status": "acknowledged"}),
ackByNotificationCounts[notification][false])
if err != nil {
slog.Errorln(err)
}
}
for notification, timeStamp := range unAckOldestByNotification {
ts := opentsdb.TagSet{"notification": notification}
var ago time.Duration
if !timeStamp.Equal(time.Unix(1<<63-62135596801, 999999999)) {
ago = utcNow().Sub(timeStamp)
}
err := collect.Put("alerts.oldest_unacked_by_notification",
ts,
ago.Seconds())
if err != nil {
slog.Errorln(err)
}
}
for alertName := range severityCounts {
ts := opentsdb.TagSet{"alert": alertName}
// The tagset of the alert is not included because there is no way to
// store the string of a group in OpenTSBD in a parsable way. This is
// because any delimiter we chose could also be part of a tag key or tag
// value.
for severity := range severityCounts[alertName] {
err := collect.Put("alerts.current_severity",
ts.Copy().Merge(opentsdb.TagSet{"severity": severity}),
severityCounts[alertName][severity])
if err != nil {
slog.Errorln(err)
}
err = collect.Put("alerts.last_abnormal_severity",
ts.Copy().Merge(opentsdb.TagSet{"severity": severity}),
abnormalCounts[alertName][severity])
if err != nil {
slog.Errorln(err)
}
}
err := collect.Put("alerts.acknowledgement_status",
ts.Copy().Merge(opentsdb.TagSet{"status": "unacknowledged"}),
ackStatusCounts[alertName][true])
err = collect.Put("alerts.acknowledgement_status",
ts.Copy().Merge(opentsdb.TagSet{"status": "acknowledged"}),
ackStatusCounts[alertName][false])
if err != nil {
slog.Errorln(err)
}
err = collect.Put("alerts.active_status",
ts.Copy().Merge(opentsdb.TagSet{"status": "active"}),
activeStatusCounts[alertName][true])
if err != nil {
slog.Errorln(err)
}
err = collect.Put("alerts.active_status",
ts.Copy().Merge(opentsdb.TagSet{"status": "inactive"}),
activeStatusCounts[alertName][false])
if err != nil {
slog.Errorln(err)
}
}
}
func (s *Schedule) GetUnknownAndUnevaluatedAlertKeys(alert string) (unknown, uneval []models.AlertKey) {
unknown, uneval, err := s.DataAccess.State().GetUnknownAndUnevalAlertKeys(alert)
if err != nil {
slog.Errorf("Error getting unknown/unevaluated alert keys: %s", err)
return nil, nil
}
return unknown, uneval
}
var bosunStartupTime = utcNow()
func (s *Schedule) findUnknownAlerts(now time.Time, alert string) []models.AlertKey {
keys := []models.AlertKey{}
if utcNow().Sub(bosunStartupTime) < s.SystemConf.GetCheckFrequency() {
return keys
}
if !s.AlertSuccessful(alert) {
return keys
}
a := s.RuleConf.GetAlert(alert)
t := a.Unknown
if t == 0 {
runEvery := s.SystemConf.GetDefaultRunEvery()
if a.RunEvery != 0 {
runEvery = a.RunEvery
}
t = s.SystemConf.GetCheckFrequency() * 2 * time.Duration(runEvery)
}
maxTouched := now.UTC().Unix() - int64(t.Seconds())
untouched, err := s.DataAccess.State().GetUntouchedSince(alert, maxTouched)
if err != nil {
slog.Errorf("Error finding unknown alerts for alert %s: %s.", alert, err)
return keys
}
for _, ak := range untouched {
if a.Squelch.Squelched(ak.Group()) {
continue
}
keys = append(keys, ak)
}
return keys
}
func (s *Schedule) CheckAlert(T miniprofiler.Timer, r *RunHistory, a *conf.Alert) (cancelled bool) {
slog.Infof("check alert %v start with now set to %v", a.Name, r.Start.Format("2006-01-02 15:04:05.999999999"))
start := utcNow()
unknowns := s.findUnknownAlerts(r.Start, a.Name)
for _, ak := range unknowns {
r.Events[ak] = &models.Event{Status: models.StUnknown}
}
var warns, crits models.AlertKeys
type res struct {
results *expr.Results
error error
}
// buffered channel so go func that runs executeExpr won't leak if the Check is cancelled
// by the closing of the schedule
rc := make(chan res, 1)
var d *expr.Results
var err error
go func() {
d, err := s.executeExpr(T, r, a, a.Depends)
rc <- res{d, err} // this will hang forever if the channel isn't buffered since nothing will ever receieve from rc
}()
select {
case res := <-rc:
d = res.results
err = res.error
// If the schedule closes before the expression has finised executing, we abandon the
// execution of the expression
case <-s.runnerContext.Done():
return true
}
var deps expr.ResultSlice
if err == nil {
deps = filterDependencyResults(d)
crits, err, cancelled = s.CheckExpr(T, r, a, a.Crit, models.StCritical, nil)
if err == nil && !cancelled {
warns, err, cancelled = s.CheckExpr(T, r, a, a.Warn, models.StWarning, crits)
}
}
if cancelled {
return true
}
unevalCount, unknownCount := markDependenciesUnevaluated(r.Events, deps, a.Name)
errors_count := 0
if err != nil {
slog.Errorf("Error checking alert %s: %s", a.Name, err.Error())
errors_count = removeUnknownEvents(r.Events, a.Name)
s.markAlertError(a.Name, err)
} else {
s.markAlertSuccessful(a.Name)
}
untouched := make([]models.AlertKey, 0)
sm := 0
for ak, ev := range r.Events {
if ak.Name() != a.Name {
continue
}
sm++
if ev.Status == models.StUnknown {
untouched = append(untouched, ak)
}
}
// normalize counters if it is first check for alert
if sm == 0 {
sm++
}
if errors_count == 0 && err != nil {
errors_count++
}
collect.Put("check.duration", opentsdb.TagSet{"name": a.Name}, time.Since(start).Seconds())
slog.Infof(
"check alert %v done (%s): %v untouched_sum, %v crits, %v warns, %v unevaluated, %v unknown, %v untouched/unknown, %v errors",
a.Name, time.Since(start), sm, len(crits), len(warns), unevalCount, unknownCount,
len(untouched), errors_count,
)
if len(untouched) > 0 {
runEvery := s.SystemConf.GetDefaultRunEvery()
if a.RunEvery != 0 {
runEvery = a.RunEvery
}
t := s.SystemConf.GetCheckFrequency() * 2 * time.Duration(runEvery)
untouchedSecs := time.Now().UTC().Unix() - r.Start.UTC().Unix() - int64(t.Seconds())
slog.Infof("Keys untouched sinse %v (%v secs): %v", r.Start, untouchedSecs, untouched)
}
return false
}
func removeUnknownEvents(evs map[models.AlertKey]*models.Event, alert string) (count int) {
for k, v := range evs {
if v.Status == models.StUnknown && k.Name() == alert {
count++
delete(evs, k)
}
}
return
}
func filterDependencyResults(results *expr.Results) expr.ResultSlice {
// take the results of the dependency expression and filter it to
// non-zero tag sets.
filtered := expr.ResultSlice{}
if results == nil {
return filtered
}
for _, r := range results.Results {
var n float64
switch v := r.Value.(type) {
case expr.Number:
n = float64(v)
case expr.Scalar:
n = float64(v)
}
if !math.IsNaN(n) && n != 0 {
filtered = append(filtered, r)
}
}
return filtered
}
func markDependenciesUnevaluated(events map[models.AlertKey]*models.Event, deps expr.ResultSlice, alert string) (unevalCount, unknownCount int) {
for ak, ev := range events {
if ak.Name() != alert {
continue
}
for _, dep := range deps {
if len(dep.Group) == 0 || dep.Group.Overlaps(ak.Group()) {
ev.Unevaluated = true
unevalCount++
}
if ev.Status == models.StUnknown {
unknownCount++
}
}
}
return unevalCount, unknownCount
}
func (s *Schedule) executeExpr(T miniprofiler.Timer, rh *RunHistory, a *conf.Alert, e *expr.Expr) (*expr.Results, error) {
if e == nil {
return nil, nil
}
providers := &expr.BosunProviders{
Cache: rh.Cache,
Search: s.Search,
Squelched: s.RuleConf.AlertSquelched(a),
History: s,
Annotate: s.annotate,
}
origin := fmt.Sprintf("Schedule: Alert Name: %s", a.Name)
results, _, err := e.Execute(rh.Backends, providers, T, rh.Start, 0, a.UnjoinedOK, origin)
return results, err
}
func (s *Schedule) CheckExpr(T miniprofiler.Timer, rh *RunHistory, a *conf.Alert, e *expr.Expr, checkStatus models.Status, ignore models.AlertKeys) (alerts models.AlertKeys, err error, cancelled bool) {
if e == nil {
return
}
defer func() {
if err == nil {
return
}
collect.Add("check.errs", opentsdb.TagSet{"metric": a.Name}, 1)
slog.Errorln(err)
}()
type res struct {
results *expr.Results
error error
}
// See s.CheckAlert for an explanation of execution and cancellation with this channel
rc := make(chan res, 1)
var results *expr.Results
go func() {
results, err := s.executeExpr(T, rh, a, e)
rc <- res{results, err}
}()
select {
case res := <-rc:
results = res.results
err = res.error
case <-s.runnerContext.Done():
return nil, nil, true
}
if err != nil {
return
}
Loop:
for _, r := range results.Results {
if s.RuleConf.Squelched(a, r.Group) {
continue
}
ak := models.NewAlertKey(a.Name, r.Group)
for _, v := range ignore {
if ak == v {
continue Loop
}
}
var n float64
n, err = valueToFloat(r.Value)
if err != nil {
return
}
event := rh.Events[ak]
if event == nil {
event = new(models.Event)
rh.Events[ak] = event
}
result := &models.Result{
Computations: r.Computations,
Value: models.Float(n),
Expr: e.String(),
}
switch checkStatus {
case models.StWarning:
event.Warn = result
case models.StCritical:
event.Crit = result
}
status := checkStatus
if math.IsNaN(n) {
status = checkStatus
} else if n == 0 {
status = models.StNormal
}
if status != models.StNormal {
alerts = append(alerts, ak)
}
if status > rh.Events[ak].Status {
event.Status = status
}
}
return
}
func valueToFloat(val expr.Value) (float64, error) {
var n float64
switch v := val.(type) {
case expr.Number:
n = float64(v)
case expr.Scalar:
n = float64(v)
default:
return 0, fmt.Errorf("expected number or scalar")
}
return n, nil
}
| {
if a.IgnoreUnknown && event.Status == models.StUnknown {
return true
}
return false
} |
arrow.py | """Arrow plots for mechanism."""
import os
from src.plot_utils import ps_defaults
from src.constants import FIGURE_PATH
from typing import Optional
import matplotlib.pyplot as plt
def plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:
|
def plot_arrow_plot_6(
save_path: Optional[str] = None, show_plots: bool = False
) -> None:
"""
Plot the arrow plot to show how it performs in cmip6.
Args:
save_path (Optional[str], optional): Where to save the plot to.
Defaults to None. If None will not save.
show_plots (bool, optional): Whether to show plots. Defaults to False.
"""
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
# ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')
wind = 0.07
wind_error = 0.01
rh = 0.15
rh_error = 0.02
cmip6 = 0.772
rh_and_wind = 0.29
rh_and_wind_error = 0.04
ax.arrow(
1,
ecmwf,
0,
wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + wind, wind_error, "EECE")
ax.arrow(
2,
ecmwf,
0,
rh - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + rh, rh_error, "EEEC")
ax.arrow(
3,
ecmwf,
0,
rh_and_wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim,
[cmip6, cmip6],
color="red",
label="CMIP6 MMM $= 0.772$ K",
)
# plt.xticks([0, 1, 2, 3], ["ECMWF", "W", "RH", "RH+W"])
plt.xticks(
[1, 2, 3],
[
"W\n"
+ r"$+ $"
+ str(wind)
+ r" $\pm$ "
+ r"$"
+ str(wind_error)
+ r"$"
+ " K ",
"RH\n " + r"$+ $ $0.15$ $\pm$ $0.02$ K",
"RH+W\n " + r"$+ $ $0.29$ $\pm$ $0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
if __name__ == "__main__":
# python src/visualisation.arrow()
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.pdf"))
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.png"))
| """
Plot the arrow plot to show that I have reproduced the paper.
Args:
save_path (Optional[str], optional): Where to save the plot to.
Defaults to None. If None will not save.
show_plots (bool, optional): Whether to show plots. Defaults to False.
"""
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
# ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')
ax.arrow(
1,
ecmwf,
0,
0.054 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + 0.054, 0.005, "EECE")
ax.arrow(
2,
ecmwf,
0,
0.31 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + 0.31, 0.03, "EEEC")
ax.arrow(
3,
ecmwf,
0,
0.47 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + 0.47, 0.04, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim, [ecmwf + 0.478, ecmwf + 0.478], color="red", label="CMIP5 MMM $= 0.889$ K"
)
# plt.xticks([0, 1, 2, 3], ["ECMWF", "W", "RH", "RH+W"])
plt.xticks(
[1, 2, 3],
[
"W\n" + r"$+ 0.054 \pm 0.005$ K ",
"RH\n " + r"$+ 0.31 \pm 0.03$ K",
"RH+W\n " + r"$+ 0.47 \pm 0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf() |
actor.py | from leapp.actors import Actor
from leapp.libraries.actor.library import remove_boot_files
from leapp.models import BootContent
from leapp.tags import IPUWorkflowTag, PreparationPhaseTag
class RemoveBootFiles(Actor):
"""
Remove Leapp provided initramfs from boot partition.
Since Leapp provided initramfs and kernel are already loaded into RAM at this phase, remove
them to have as little space requirements for boot partition as possible.
"""
name = 'remove_boot_files'
consumes = (BootContent,)
produces = ()
tags = (IPUWorkflowTag, PreparationPhaseTag)
def process(self):
| remove_boot_files() |
|
visualization_utils.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
# Set headless-friendly backend.
#import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_keypoint_scores=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4]: masks (optional)
[4-5]: keypoints (optional)
[4-6]: keypoint_scores (optional)
[4-7]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_keypoint_scores: Whether keypoint scores should be expected as a
positional argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be:
image - uint8 numpy array with shape (img_height, img_width, 3).
boxes - a numpy array of shape [N, 4].
classes - a numpy array of shape [N].
scores - a numpy array of shape [N] or None.
-- Optional positional arguments --
instance_masks - a numpy array of shape [N, image_height, image_width].
keypoints - a numpy array of shape [N, num_keypoints, 2].
keypoint_scores - a numpy array of shape [N, num_keypoints].
track_ids - a numpy array of shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = keypoint_scores = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoint_scores:
keypoint_scores = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def draw_heatmaps_on_image(image, heatmaps):
"""Draws heatmaps on an image.
The heatmaps are handled channel by channel and different colors are used to
paint different heatmap channels.
Args:
image: a PIL.Image object.
heatmaps: a numpy array with shape [image_height, image_width, channel].
Note that the image_height and image_width should match the size of input
image.
"""
draw = ImageDraw.Draw(image)
channel = heatmaps.shape[2]
for c in range(channel):
heatmap = heatmaps[:, :, c] * 255
heatmap = heatmap.astype('uint8')
bitmap = Image.fromarray(heatmap, 'L')
bitmap.convert('1')
draw.bitmap(
xy=[(0, 0)],
bitmap=bitmap,
fill=STANDARD_COLORS[c])
def draw_heatmaps_on_image_array(image, heatmaps):
"""Overlays heatmaps to an image (numpy array).
The function overlays the heatmaps on top of image. The heatmap values will be
painted with different colors depending on the channels. Similar to
"draw_heatmaps_on_image_array" function except the inputs are numpy arrays.
Args:
image: a numpy array with shape [height, width, 3].
heatmaps: a numpy array with shape [height, width, channel].
Returns:
An uint8 numpy array representing the input image painted with heatmap
colors.
"""
if not isinstance(image, np.ndarray):
image = image.numpy()
if not isinstance(heatmaps, np.ndarray):
heatmaps = heatmaps.numpy()
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_heatmaps_on_image(image_pil, heatmaps)
return np.array(image_pil)
def draw_heatmaps_on_image_tensors(images,
heatmaps,
apply_sigmoid=False):
"""Draws heatmaps on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the
heatmaps will be resized to match the input image size before overlaying
the heatmaps with input images. Theoretically the heatmap height width
should have the same aspect ratio as the input image to avoid potential
misalignment introduced by the image resize.
apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If
the heatmaps come directly from the prediction logits, then we should
apply the sigmoid layer to make sure the values are in between [0.0, 1.0].
Returns:
4D image tensor of type uint8, with heatmaps overlaid on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
_, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)
if apply_sigmoid:
heatmaps = tf.math.sigmoid(heatmaps)
resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])
elems = [images, resized_heatmaps]
def draw_heatmaps(image_and_heatmaps):
"""Draws heatmaps on image."""
image_with_heatmaps = tf.py_function(
draw_heatmaps_on_image_array,
image_and_heatmaps,
tf.uint8)
return image_with_heatmaps
images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)
return images
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_scores: A 3D float32 tensor of shape [N, max_detection,
num_keypoints] with keypoint scores.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_keypoint_scores=keypoint_scores is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if keypoint_scores is not None:
elems.append(keypoint_scores)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
|
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if (key != input_data_fields.original_image and
key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
keypoint_scores = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
if detection_fields.detection_keypoint_scores in eval_dict:
keypoint_scores = tf.expand_dims(
eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)
else:
keypoint_scores = tf.cast(keypoint_ops.set_keypoint_visibilities(
keypoints), dtype=tf.float32)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
groundtruth_keypoints = None
groundtruth_keypoint_scores = None
gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities
if input_data_fields.groundtruth_keypoints in eval_dict:
groundtruth_keypoints = tf.expand_dims(
eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)
if gt_kpt_vis_fld in eval_dict:
groundtruth_keypoint_scores = tf.expand_dims(
tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)
else:
groundtruth_keypoint_scores = tf.cast(
keypoint_ops.set_keypoint_visibilities(
groundtruth_keypoints), dtype=tf.float32)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=groundtruth_keypoints,
keypoint_scores=groundtruth_keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat([images_with_detections,
images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only
those keypoints with a score above score_threshold will be visualized.
min_score_thresh: A scalar indicating the minimum keypoint score required
for a keypoint to be visualized. Note that keypoint_scores must be
provided for this threshold to take effect.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil,
keypoints,
keypoint_scores=keypoint_scores,
min_score_thresh=min_score_thresh,
color=color,
radius=radius,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=keypoint_edge_color,
keypoint_edge_width=keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints].
min_score_thresh: a score threshold for visualizing keypoints. Only used if
keypoint_scores is provided.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints = np.array(keypoints)
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
if keypoint_scores is not None:
keypoint_scores = np.array(keypoint_scores)
valid_kpt = np.greater(keypoint_scores, min_score_thresh)
else:
valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),
np.zeros_like(keypoints[:, 0]),
np.ones_like(keypoints[:, 0]))
valid_kpt = [v for v in valid_kpt]
for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):
if valid:
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None.
keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box or keypoint to be
visualized.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_keypoint_scores_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if keypoint_scores is not None:
box_to_keypoint_scores_map[box].extend(keypoint_scores[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(round(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, round(100*scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
keypoint_scores_for_box = None
if box_to_keypoint_scores_map:
keypoint_scores_for_box = box_to_keypoint_scores_map[box]
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
keypoint_scores_for_box,
min_score_thresh=min_score_thresh,
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.InputDataFields.groundtruth_keypoints - (optional)
[batch_size, num_boxes, num_keypoints, 2] float32 tensor with
keypoint coordinates in format [y, x].
fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)
[batch_size, num_boxes, num_keypoints] bool tensor with
keypoint visibilities.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
fields.DetectionResultFields.detection_keypoint_scores - (optional)
[batch_size, max_num_boxes, num_keypoints] float32 tensor with
keypoints scores.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]])
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
| """Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes |
columnreader.py | # Copyright 2018 Delft University of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package contains a python object representation for ColumnReaders, and
the functions needed to generate a ColumnReader from an Arrow field
(represented as the objects in fields.py)."""
from itertools import zip_longest
import random
from .configurable import *
from .fields import *
from .streams import *
from .lines import *
from .testbench import *
__all__ = ["ColumnReader", "BUS_ADDR_WIDTH", "INDEX_WIDTH", "CMD_TAG_WIDTH"]
# Define the generics used by ColumnReaders.
BUS_ADDR_WIDTH = Generic("BUS_ADDR_WIDTH")
BUS_LEN_WIDTH = Generic("BUS_LEN_WIDTH")
BUS_DATA_WIDTH = Generic("BUS_DATA_WIDTH")
INDEX_WIDTH = Generic("INDEX_WIDTH")
CMD_TAG_WIDTH = Generic("CMD_TAG_WIDTH")
class ReaderLevel(Configurable):
"""Represents an abstract ColumnReaderLevel(Level)."""
def __init__(self, **config):
super().__init__(**config)
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "?"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return []
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
raise NotImplemented()
@classmethod | addr = memory.byte_addr()
for entry in data:
memory.write(entry, bits)
return addr
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
raise NotImplemented()
def __str__(self):
"""Returns the cfg string for this ReaderLevel."""
children = ",".join(map(str, self._children))
attrs = ",".join(map(lambda x: "%s=%s" % x, self._config.items()))
attrs = []
for key, value in self._config.items():
if isinstance(value, int) or isinstance(value, bool):
value = str(int(value))
attrs.append("%s=%s" % (key, value))
attrs = ",".join(attrs)
if attrs:
attrs = ";" + attrs
return "%s(%s%s)" % (self._cmdname, children, attrs)
class PrimReaderLevel(ReaderLevel):
"""A reader for a primitive data type."""
def __init__(
self,
bit_width,
cmd_stream,
cmd_val_base,
out_stream,
out_val,
**kwargs
):
super().__init__(**kwargs)
# Check and save the bit width.
if not bit_width or bit_width & (bit_width-1):
raise ValueError("bit width must be a power of two")
self.bit_width = bit_width
self.cmd_stream = cmd_stream
self.cmd_val_base = cmd_val_base
self.out_stream = out_stream
self.out_val = out_val
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "prim"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.bit_width]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return 2
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Generate memory for 4 buffers of the given row count. We randomly
# select which buffer to use for each command.
buffers = []
for _ in range(4):
data = [random.randrange(1 << self.bit_width) for _ in range(row_count)]
addr = self._write_buffer(memory, self.bit_width, data)
buffers.append((addr, data))
# Generate test vectors for our signals.
base_tv = TestVectors(self.cmd_val_base)
val_tv = TestVectors(self.out_val, self.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
addr, data = buffers[buf_idx]
base_tv.append(addr)
val_tv.extend(data[start:stop])
return [base_tv, val_tv]
class ArbReaderLevel(ReaderLevel):
"""A wrapper for readers that instantiates a bus arbiter and optionally
slices for all the other streams."""
def __init__(self, child, **kwargs):
super().__init__(**kwargs)
self.child = child
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "arb"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.child]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"method": "ROUND-ROBIN",
"max_outstanding": 2,
"ram_config": "",
"req_in_slices": False,
"req_out_slice": True,
"resp_in_slice": False,
"resp_out_slices": True,
"cmd_stream_slice": True,
"unlock_stream_slice": True,
"out_stream_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return 1
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
return self.child.test_vectors(memory, row_count, commands)
class NullReaderLevel(ReaderLevel):
"""A reader for a null bitmap."""
def __init__(
self,
child,
cmd_stream,
cmd_no_nulls,
cmd_null_base,
out_stream,
out_not_null,
**kwargs
):
super().__init__(**kwargs)
self.child = child
self.cmd_stream = cmd_stream
self.cmd_no_nulls = cmd_no_nulls
self.cmd_null_base = cmd_null_base
self.out_stream = out_stream
self.out_not_null = out_not_null
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "null"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.child]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return self.child.bus_count() + 1
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Generate memory for 3 buffers of the given row count. We randomly
# select between one of the buffers and an implicit null bitmap for
# each command.
buffers = []
for _ in range(3):
data = [min(1, random.randrange(10)) for _ in range(row_count)]
addr = self._write_buffer(memory, 1, data)
buffers.append((addr, data))
# Generate test vectors for our signals.
impl_tv = TestVectors(self.cmd_no_nulls)
base_tv = TestVectors(self.cmd_null_base)
val_tv = TestVectors(self.out_not_null, self.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
if buf_idx < 3:
addr, data = buffers[buf_idx]
impl_tv.append(0)
base_tv.append(addr)
val_tv.extend(data[start:stop])
else:
impl_tv.append(1)
base_tv.append(None)
val_tv.extend([1 for _ in range(start, stop)])
return [impl_tv, base_tv, val_tv] + self.child.test_vectors(memory, row_count, commands)
def _list_test_vectors(reader, memory, row_count, commands):
"""Test vector generation function shared by ListReaderLevel and
ListPrimReaderLevel."""
# Generate on average 4 items per list.
child_length = row_count * 4
child_commands = []
child_idxs = []
# Generate memory for 4 buffers of the given row count. We randomly
# select one of the buffers for each command.
buffers = []
for _ in range(4):
data = [random.randint(0, child_length) for _ in range(row_count-1)]
data = [0] + sorted(data) + [child_length]
addr = reader._write_buffer(memory, 32, data) # FIXME: this width is actually a generic!
buffers.append((addr, data))
# Generate test vectors for our signals and figure out the command
# stream for the child.
base_tv = TestVectors(reader.cmd_idx_base)
len_tv = TestVectors(reader.out_length, reader.out_stream.name + "dvalid = '1'")
for start, stop in commands:
buf_idx = random.randrange(4)
addr, data = buffers[buf_idx]
child_commands.append((data[start], data[stop]))
child_idxs.append(list(zip(data[start:stop], data[start+1:stop+1])))
base_tv.append(addr)
len_tv.extend([data[i+1] - data[i] for i in range(start, stop)])
return child_length, child_commands, child_idxs, [base_tv, len_tv]
class ListReaderLevel(ReaderLevel):
"""A reader for a list index buffer."""
def __init__(
self,
child,
cmd_stream,
cmd_idx_base,
out_stream,
out_length,
out_el_stream,
**kwargs
):
super().__init__(**kwargs)
self.child = child
self.cmd_stream = cmd_stream
self.cmd_idx_base = cmd_idx_base
self.out_stream = out_stream
self.out_length = out_length
self.out_el_stream = out_el_stream
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "list"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.child]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"cmd_out_slice": True,
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"len_out_slice": True,
"len_sync_slice": True,
"data_in_slice": False,
"data_out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return self.child.bus_count() + 1
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Figure out the test vectors for the list.
child_length, child_commands, child_idxs, tvs = _list_test_vectors(
self, memory, row_count, commands)
# Figure out the test vectors for the child.
tvs.extend(self.child.test_vectors(memory, child_length, child_commands))
# Figure out the last/dvalid signals for the element stream.
last_tv = TestVectors(self.out_el_stream.signals[0])
dvalid_tv = TestVectors(self.out_el_stream.signals[1])
for idxs in child_idxs:
for start, stop in idxs:
l = stop - start
if not l:
last_tv.append(1)
dvalid_tv.append(0)
else:
for i in range(l):
last_tv.append(int(i == l - 1))
dvalid_tv.append(1)
return tvs + [last_tv, dvalid_tv]
class ListPrimReaderLevel(ReaderLevel):
"""A reader for a list of non-nullable primitive data types."""
def __init__(
self,
bit_width,
cmd_stream,
cmd_idx_base,
cmd_val_base,
out_stream,
out_length,
out_el_stream,
out_el_values,
out_el_count,
**kwargs
):
super().__init__(**kwargs)
# Check and save the bit width.
if not bit_width or bit_width & (bit_width-1):
raise ValueError("bit width must be a power of two")
self.bit_width = bit_width
self.cmd_stream = cmd_stream
self.cmd_idx_base = cmd_idx_base
self.cmd_val_base = cmd_val_base
self.out_stream = out_stream
self.out_length = out_length
self.out_el_stream = out_el_stream
self.out_el_values = out_el_values
self.out_el_count = out_el_count
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "listprim"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.bit_width]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
"epc": 1,
"idx_cmd_in_slice": False,
"idx_bus_req_slice": True,
"idx_bus_fifo_depth": 16,
"idx_bus_fifo_ram_config": "",
"idx_cmd_out_slice": True,
"idx_unlock_slice": True,
"idx_shr2gb_slice": False,
"idx_gb2fifo_slice": False,
"idx_fifo_size": 64,
"idx_fifo_ram_config": "",
"idx_fifo_xclk_stages": 0,
"idx_fifo2post_slice": False,
"cmd_in_slice": False,
"bus_req_slice": True,
"bus_fifo_depth": 16,
"bus_fifo_ram_config": "",
"unlock_slice": True,
"shr2gb_slice": False,
"gb2fifo_slice": False,
"fifo_size": 64,
"fifo_ram_config": "",
"fifo_xclk_stages": 0,
"fifo2post_slice": False,
"out_slice": False,
"len_out_slice": True,
"data_in_slice": False,
"len_sync_slice": True,
"data_out_slice": True
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return 2
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
# Figure out the test vectors for the list.
child_length, child_commands, child_idxs, tvs = _list_test_vectors(
self, memory, row_count, commands)
# Generate memory for 4 buffers of the given child length. We randomly
# select which buffer to use for each command.
buffers = []
for _ in range(4):
data = [random.randrange(1 << self.bit_width) for _ in range(child_length)]
addr = self._write_buffer(memory, self.bit_width, data)
buffers.append((addr, data))
# Generate test vectors for our signals.
base_tv = TestVectors(self.cmd_val_base)
val_tvs = [TestVectors(sig) for sig in self.out_el_values]
cnt_tv = TestVectors(self.out_el_count)
last_tv = TestVectors(self.out_el_stream.signals[0])
dvalid_tv = TestVectors(self.out_el_stream.signals[1])
for idxs in child_idxs:
buf_idx = random.randrange(4)
addr, cmd_data = buffers[buf_idx]
base_tv.append(addr)
for start, stop in idxs:
data = cmd_data[start:stop]
while True:
cnt = 0
for val_tv in val_tvs:
if data:
val_tv.append(data.pop(0))
cnt += 1
else:
val_tv.append()
cnt_tv.append(cnt)
dvalid_tv.append(1 if cnt > 0 else 0)
if not data:
last_tv.append(1)
break
else:
last_tv.append(0)
return tvs + val_tvs + [base_tv, cnt_tv, last_tv, dvalid_tv]
class StructReaderLevel(ReaderLevel):
"""A reader for a struct of TWO child readers."""
def __init__(self, a, b, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
@property
def _cmdname(self):
"""Returns the cfg string command name for this ReaderLevel."""
return "struct"
@property
def _children(self):
"""Returns a list of child ReaderLevels or parameters."""
return [self.a, self.b]
@property
def _config_defaults(self):
return { # NOTE: the defaults here MUST correspond to VHDL defaults.
}
def bus_count(self):
"""Returns the number of busses used by this ReaderLevel."""
return self.a.bus_count() + self.b.bus_count()
def test_vectors(self, memory, row_count, commands):
"""Returns a set of test vectors for all the signals defined by this
ReaderLevel (both command and response), given a row count and a list
of commands (represented as a list of two-tuples, where the first entry
is the inclusive start index and the second is the exclusive stop
index). The Memory object that should be passed to memory is updated
accordingly, with new data sets generated at the current memory
pointer."""
return (
self.a.test_vectors(memory, row_count, commands)
+ self.b.test_vectors(memory, row_count, commands)
)
def _new_cmd_stream(prefix, field_prefix=""):
"""Constructs a command stream. Returns the stream and the ctrl
SignalGroup."""
p = prefix + "cmd_" + field_prefix
s = Stream(p)
s.append(Signal(p + "firstIdx", INDEX_WIDTH))
s.append(Signal(p + "lastIdx", INDEX_WIDTH))
ctrl = s.append(SignalGroup(p + "ctrl"))
s.append(Signal(p + "tag", CMD_TAG_WIDTH))
return s, ctrl
def _new_out_stream(prefix, field_prefix=""):
"""Constructs an output stream. Returns the stream and the data
SignalGroup."""
p = prefix + "out_" + field_prefix
s = Stream(p)
s.append(Signal(p + "last"))
s.append(Signal(p + "dvalid"))
data = s.append(SignalGroup(p + "data"))
return s, data
def _maybe_wrap_in_arbiter(reader, **opts):
"""Wraps the given reader in a ArbReaderLevel if deemed necessary."""
# TODO: make this stuff customizable using **opts.
if reader.bus_count() > 3:
reader = ArbReaderLevel(reader)
return reader
def _scalar_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a scalar field into a ReaderLevel."""
# Add the signals to the streams.
cmd_val_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "valBase", BUS_ADDR_WIDTH))
out_val = out_data.append(Signal(prefix + "out_" + field_prefix + "val", field.bit_width))
# Construct the primitive reader.
reader = PrimReaderLevel(
field.bit_width,
cmd_stream,
cmd_val_base,
out_stream,
out_val,
**field.get_cfg_dict({
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"out_slice": "out_slice"
})
)
return reader, []
def _bytes_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a UTF8/bytes field into a ReaderLevel."""
# Add the signals to the existing streams.
cmd_val_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "valBase", BUS_ADDR_WIDTH))
cmd_idx_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "idxBase", BUS_ADDR_WIDTH))
out_length = out_data.append(Signal(prefix + "out_" + field_prefix + "len", INDEX_WIDTH))
# Create a secondary output stream for the list elements.
out_el_stream, out_el_data = _new_out_stream(prefix, field_prefix + "el_")
# Populate the secondary output stream.
epc = field.bytes_per_cycle
out_el_count = out_el_data.append(Signal(prefix + "out_" + field_prefix + "el_cnt", int.bit_length(epc)))
out_el_values = [
Signal(prefix + "out_" + field_prefix + "el_val" + str(i), field.bit_width)
for i in range(epc)
]
# The elements are serialized MSB first!
for sig in reversed(out_el_values):
out_el_data.append(sig)
# Construct the primitive reader.
reader = ListPrimReaderLevel(
field.bit_width,
cmd_stream,
cmd_idx_base,
cmd_val_base,
out_stream,
out_length,
out_el_stream,
out_el_values,
out_el_count,
**field.get_cfg_dict({
"bytes_per_cycle": "epc",
"idx_cmd_in_slice": "idx_cmd_in_slice",
"idx_bus_req_slice": "idx_bus_req_slice",
"idx_bus_fifo_depth": "idx_bus_fifo_depth",
"idx_bus_fifo_ram_config": "idx_bus_fifo_ram_config",
"idx_cmd_out_slice": "idx_cmd_out_slice",
"idx_unlock_slice": "idx_unlock_slice",
"idx_shr2gb_slice": "idx_shr2gb_slice",
"idx_gb2fifo_slice": "idx_gb2fifo_slice",
"idx_fifo_size": "idx_fifo_size",
"idx_fifo_ram_config": "idx_fifo_ram_config",
"idx_fifo_xclk_stages": "idx_fifo_xclk_stages",
"idx_fifo2post_slice": "idx_fifo2post_slice",
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"out_slice": "out_slice",
"len_out_slice": "len_out_slice",
"data_in_slice": "data_in_slice",
"len_sync_slice": "len_sync_slice",
"data_out_slice": "data_out_slice"
})
)
return reader, [out_el_stream]
def _list_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a list field into a ReaderLevel."""
# Add the signals to the existing streams.
out_length = out_data.append(Signal(prefix + "out_" + field_prefix + "len", INDEX_WIDTH))
# Create a secondary output stream for the list elements.
out_el_stream, out_el_data = _new_out_stream(prefix, field_prefix + field.child.name + "_")
# Populate the secondary output stream with the child reader.
reader, secondary_out_streams = _field_reader(
field.child,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_el_stream, out_el_data,
**opts)
# Command stream signal must be appended after traversing into the
# hierarchy.
cmd_idx_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "idxBase", BUS_ADDR_WIDTH))
# Construct the primitive reader.
reader = ListReaderLevel(
reader,
cmd_stream,
cmd_idx_base,
out_stream,
out_length,
out_el_stream,
**field.get_cfg_dict({
"cmd_in_slice": "cmd_in_slice",
"bus_req_slice": "bus_req_slice",
"bus_fifo_depth": "bus_fifo_depth",
"bus_fifo_ram_config": "bus_fifo_ram_config",
"cmd_out_slice": "cmd_out_slice",
"unlock_slice": "unlock_slice",
"shr2gb_slice": "shr2gb_slice",
"gb2fifo_slice": "gb2fifo_slice",
"fifo_size": "fifo_size",
"fifo_ram_config": "fifo_ram_config",
"fifo_xclk_stages": "fifo_xclk_stages",
"fifo2post_slice": "fifo2post_slice",
"len_out_slice": "len_out_slice",
"len_sync_slice": "len_sync_slice",
"data_in_slice": "data_in_slice",
"data_out_slice": "data_out_slice"
})
)
return reader, [out_el_stream] + secondary_out_streams
def _struct_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a struct field into a ReaderLevel."""
# Construct the child Reader objects.
child_readers = []
secondary_out_streams = []
for child in field.iter_children():
child_reader, child_secondary_out_stream = _field_reader(
child,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_stream, out_data,
**opts)
child_readers.append(child_reader)
secondary_out_streams.extend(child_secondary_out_stream)
# Create a binary tree of readers.
while True:
# Stop if there's only one reader left.
if len(child_readers) == 1:
reader = child_readers[0]
break
# Add a level of structs.
it = iter(child_readers)
child_readers = []
for a, b in zip_longest(*[it]*2, fillvalue=None):
if b is None:
# Odd amount of child readers at this level of the binary tree;
# add the last reader without an additional struct level.
child_readers.append(a)
else:
struct = StructReaderLevel(a, b)
struct = _maybe_wrap_in_arbiter(struct, **opts)
child_readers.append(struct)
return reader, secondary_out_streams
def _field_reader(field, prefix, field_prefix, cmd_stream, cmd_ctrl, out_stream, out_data, **opts):
"""Internal function which converts a field into a ReaderLevel. This is
appropriately called by the initializer of Reader()."""
if not isinstance(field, Field):
raise TypeError("field must be of type %s" % Field)
if field.is_null():
raise ValueError("cannot make a reader for a null field")
# Update the field prefix.
if field_prefix is None:
field_prefix = ""
else:
field_prefix += field.name + "_"
# Add the signals for the null reader if this field is nullable. This must
# be done before going down the hierarchy.
if field.nullable:
out_not_null = out_data.append(Signal(prefix + "out_" + field_prefix + "notNull"))
# Defer to the field-specific generators.
for typ, gen in [
(ScalarField, _scalar_reader),
(BytesField, _bytes_reader),
(ListField, _list_reader),
(StructField, _struct_reader)
]:
if isinstance(field, typ):
reader, secondary_out_streams = gen(
field,
prefix, field_prefix,
cmd_stream, cmd_ctrl,
out_stream, out_data,
**opts)
break
else:
raise NotImplemented("No code generator is implemented for Field type %s" % type(field))
# Command stream signals must be appended after traversing into the
# hierarchy.
if field.nullable:
cmd_no_nulls = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "noNulls"))
cmd_null_base = cmd_ctrl.append(Signal(prefix + "cmd_" + field_prefix + "nullBase", BUS_ADDR_WIDTH))
# Generate the null() level if this field is nullable.
if field.nullable:
reader = NullReaderLevel(
reader,
cmd_stream,
cmd_no_nulls,
cmd_null_base,
out_stream,
out_not_null,
**field.get_cfg_dict({
"null_cmd_in_slice": "cmd_in_slice",
"null_bus_req_slice": "bus_req_slice",
"null_bus_fifo_depth": "bus_fifo_depth",
"null_bus_fifo_ram_config": "bus_fifo_ram_config",
"null_unlock_slice": "unlock_slice",
"null_shr2gb_slice": "shr2gb_slice",
"null_gb2fifo_slice": "gb2fifo_slice",
"null_fifo_size": "fifo_size",
"null_fifo_ram_config": "fifo_ram_config",
"null_fifo_xclk_stages": "fifo_xclk_stages",
"null_fifo2post_slice": "fifo2post_slice",
"null_out_slice": "out_slice"
})
)
# Wrap the field in an arbiter based on the arbiter policy.
reader = _maybe_wrap_in_arbiter(reader, **opts)
return reader, secondary_out_streams
wrapper_body_template = """
-- Copyright (C) Delft University of Technology - All Rights Reserved
-- (until further notice)
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;
library work;
use work.Streams.all;
use work.Utils.all;
use work.ColumnConfig.all;
use work.ColumnConfigParse.all;
use work.Columns.all;
entity {camelprefix}ColumnReader is
generic (
---------------------------------------------------------------------------
-- Bus metrics and configuration
---------------------------------------------------------------------------
-- Bus address width.
BUS_ADDR_WIDTH : natural := 32;
-- Bus burst length width.
BUS_LEN_WIDTH : natural := 8;
-- Bus data width.
BUS_DATA_WIDTH : natural := 32;
-- Number of beats in a burst step.
BUS_BURST_STEP_LEN : natural := 4;
-- Maximum number of beats in a burst.
BUS_BURST_MAX_LEN : natural := 16;
---------------------------------------------------------------------------
-- Arrow metrics and configuration
---------------------------------------------------------------------------
-- Index field width.
INDEX_WIDTH : natural := 32;
---------------------------------------------------------------------------
-- Column metrics and configuration
---------------------------------------------------------------------------
-- Enables or disables command stream tag system. When enabled, an
-- additional output stream is created that returns tags supplied along
-- with the command stream when all BufferReaders finish making bus
-- requests for the command. This can be used to support chunking later.
CMD_TAG_ENABLE : boolean := false;
-- Command stream tag width. Must be at least 1 to avoid null vectors.
CMD_TAG_WIDTH : natural := 1
);
port (
---------------------------------------------------------------------------
-- Clock domains
---------------------------------------------------------------------------
-- Rising-edge sensitive clock and active-high synchronous reset for the
-- bus and control logic side of the BufferReader.
bus_clk : in std_logic;
bus_reset : in std_logic;
-- Rising-edge sensitive clock and active-high synchronous reset for the
-- accelerator side.
acc_clk : in std_logic;
acc_reset : in std_logic;
---------------------------------------------------------------------------
-- Command streams
---------------------------------------------------------------------------
-- Command stream input (bus clock domain). firstIdx and lastIdx represent
-- a range of elements to be fetched from memory. firstIdx is inclusive,
-- lastIdx is exclusive for normal buffers and inclusive for index buffers,
-- in all cases resulting in lastIdx - firstIdx elements. The ctrl vector
-- is a concatenation of the base address for each buffer and the null
-- bitmap present flags, dependent on CFG.
@cmd_ports
-- Unlock stream (bus clock domain). Produces the chunk tags supplied by
-- the command stream when all BufferReaders finish processing the command.
unlock_valid : out std_logic;
unlock_ready : in std_logic := '1';
unlock_tag : out std_logic_vector(CMD_TAG_WIDTH-1 downto 0);
---------------------------------------------------------------------------
-- Bus access ports
---------------------------------------------------------------------------
-- Bus access port (bus clock domain).
bus_rreq_valid : out std_logic;
bus_rreq_ready : in std_logic;
bus_rreq_addr : out std_logic_vector(BUS_ADDR_WIDTH-1 downto 0);
bus_rreq_len : out std_logic_vector(BUS_LEN_WIDTH-1 downto 0);
bus_rdat_valid : in std_logic;
bus_rdat_ready : out std_logic;
bus_rdat_data : in std_logic_vector(BUS_DATA_WIDTH-1 downto 0);
bus_rdat_last : in std_logic;
---------------------------------------------------------------------------
-- User streams
---------------------------------------------------------------------------
@out_ports
);
end {camelprefix}ColumnReader;
architecture Behavioral of {camelprefix}ColumnReader is
@defs
begin
@arch
-- Wrap an arbiter and register slices around the requested column reader.
{lowerprefix}inst: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => acc_clk,
acc_reset => acc_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
cmd_tag => cmd_tag,
unlock_valid => unlock_valid,
unlock_ready => unlock_ready,
unlock_tag => unlock_tag,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
end Behavioral;
"""
wrapper_component_template = """
component {camelprefix}ColumnReader is
generic (
BUS_ADDR_WIDTH : natural := 32;
BUS_LEN_WIDTH : natural := 8;
BUS_DATA_WIDTH : natural := 32;
BUS_BURST_STEP_LEN : natural := 4;
BUS_BURST_MAX_LEN : natural := 16;
INDEX_WIDTH : natural := 32;
CMD_TAG_ENABLE : boolean := false;
CMD_TAG_WIDTH : natural := 1
);
port (
bus_clk : in std_logic;
bus_reset : in std_logic;
acc_clk : in std_logic;
acc_reset : in std_logic;
@cmd_ports
unlock_valid : out std_logic;
unlock_ready : in std_logic := '1';
unlock_tag : out std_logic_vector(CMD_TAG_WIDTH-1 downto 0);
bus_rreq_valid : out std_logic;
bus_rreq_ready : in std_logic;
bus_rreq_addr : out std_logic_vector(BUS_ADDR_WIDTH-1 downto 0);
bus_rreq_len : out std_logic_vector(BUS_LEN_WIDTH-1 downto 0);
bus_rdat_valid : in std_logic;
bus_rdat_ready : out std_logic;
bus_rdat_data : in std_logic_vector(BUS_DATA_WIDTH-1 downto 0);
bus_rdat_last : in std_logic;
@out_ports
);
end component;
"""
uut_template_with_unlock = """
uut: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => {acc}_clk,
acc_reset => {acc}_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
cmd_tag => cmd_tag,
unlock_valid => unlock_valid,
unlock_ready => unlock_ready,
unlock_tag => unlock_tag,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
"""
uut_template_without_unlock = """
uut: ColumnReaderLevel
generic map (
BUS_ADDR_WIDTH => BUS_ADDR_WIDTH,
BUS_LEN_WIDTH => BUS_LEN_WIDTH,
BUS_DATA_WIDTH => BUS_DATA_WIDTH,
BUS_BURST_STEP_LEN => BUS_BURST_STEP_LEN,
BUS_BURST_MAX_LEN => BUS_BURST_MAX_LEN,
INDEX_WIDTH => INDEX_WIDTH,
CFG => "{cfg}",
CMD_TAG_ENABLE => CMD_TAG_ENABLE,
CMD_TAG_WIDTH => CMD_TAG_WIDTH
)
port map (
bus_clk => bus_clk,
bus_reset => bus_reset,
acc_clk => {acc}_clk,
acc_reset => {acc}_reset,
cmd_valid => cmd_valid,
cmd_ready => cmd_ready,
cmd_firstIdx => cmd_firstIdx,
cmd_lastIdx => cmd_lastIdx,
cmd_ctrl => cmd_ctrl,
bus_rreq_valid(0) => bus_rreq_valid,
bus_rreq_ready(0) => bus_rreq_ready,
bus_rreq_addr => bus_rreq_addr,
bus_rreq_len => bus_rreq_len,
bus_rdat_valid(0) => bus_rdat_valid,
bus_rdat_ready(0) => bus_rdat_ready,
bus_rdat_data => bus_rdat_data,
bus_rdat_last(0) => bus_rdat_last,
out_valid => out_valids,
out_ready => out_readys,
out_last => out_lasts,
out_dvalid => out_dvalids,
out_data => out_datas
);
"""
class ColumnReader(object):
"""Represents a ColumnReader."""
def __init__(self, field, instance_prefix=None, signal_prefix="", bus_clk_prefix="", main_clk_prefix="", **opts):
"""Generates a ColumnReader for the given Arrow field. prefix
optionally specifies a name for the ColumnReader, which will be
prefixed to all signals and instance names in the generated code."""
super().__init__()
# Basic error checking.
if not isinstance(field, Field):
raise TypeError("field must be of type %s" % Field)
self.field = field
# Figure out the prefixes.
if instance_prefix is None:
instance_prefix = field.name
if instance_prefix and not instance_prefix[-1] == "_":
instance_prefix += "_"
self.instance_prefix = instance_prefix
if signal_prefix is None:
signal_prefix = field.name
if signal_prefix and not signal_prefix[-1] == "_":
signal_prefix += "_"
self.signal_prefix = signal_prefix
if bus_clk_prefix and not bus_clk_prefix[-1] == "_":
bus_clk_prefix += "_"
self.bus_clk_prefix = bus_clk_prefix
if main_clk_prefix and not main_clk_prefix[-1] == "_":
main_clk_prefix += "_"
self.main_clk_prefix = main_clk_prefix
# Construct the streams.
self.cmd_stream, cmd_ctrl = _new_cmd_stream(self.signal_prefix)
p = self.signal_prefix + "unlock_"
self.unlock_stream = Stream(p)
self.unlock_stream.append(Signal(p + "tag", CMD_TAG_WIDTH))
p = self.signal_prefix + "bus_rreq_"
self.bus_rreq_stream = Stream(p)
self.bus_rreq_stream.append(Signal(p + "addr", BUS_ADDR_WIDTH))
self.bus_rreq_stream.append(Signal(p + "len", BUS_LEN_WIDTH))
p = self.signal_prefix + "bus_rdat_"
self.bus_rdat_stream = Stream(p)
self.bus_rdat_stream.append(Signal(p + "data", BUS_DATA_WIDTH))
self.bus_rdat_stream.append(Signal(p + "last"))
main_out_stream, out_data = _new_out_stream(self.signal_prefix)
# Construct the field reader.
reader, secondary_out_streams = _field_reader(
self.field,
self.signal_prefix, None,
self.cmd_stream, cmd_ctrl,
main_out_stream, out_data,
**opts)
# If the reader has more than one bus, wrap in an arbiter.
if reader.bus_count() > 1:
reader = ArbReaderLevel(reader)
self.reader = reader
# Construct the output stream group.
self.out_stream = StreamGroup(main_out_stream, *secondary_out_streams)
@property
def _camel_prefix(self):
"""Returns the instance prefix in CamelCase."""
return "".join([w[:1].upper() + w[1:] for w in self.instance_prefix.split("_")])
@property
def _lower_prefix(self):
"""Returns the instance prefix in lower_case."""
return self.instance_prefix.lower()
def cfg(self):
"""Returns the cfg string representation of this ColumnReader."""
return str(self.reader)
def wrapper_body(self):
"""Returns the VHDL entity and body for this ColumnReader's wrapper."""
return gen_template(
wrapper_body_template,
camelprefix = self._camel_prefix,
lowerprefix = self._lower_prefix,
cfg = self.cfg(),
cmd_ports = self.cmd_stream.def_ports(PortDir.IN, False),
out_ports = self.out_stream.def_ports(PortDir.OUT, False).trimsep(),
defs = self.cmd_stream.def_signals(False) + self.out_stream.def_signals(False),
arch = self.cmd_stream.arch_serialize() + self.out_stream.arch_deserialize()
)
def wrapper_component(self):
"""Returns the VHDL entity and body for this ColumnReader's wrapper."""
return gen_template(
wrapper_component_template,
camelprefix = self.instance_prefix[:-1],
cmd_ports = self.cmd_stream.def_ports(PortDir.IN, False),
out_ports = self.out_stream.def_ports(PortDir.OUT, False).trimsep()
)
def testbench(self, **kwargs):
"""Generates a randomized testbench for this ColumnReader."""
# Randomize any parameters not explicitly given.
params = []
def get_param(name, default):
value = kwargs.get(name, default)
params.append((name, value))
return value
seed = get_param("seed", random.randrange(1<<32))
random.seed(seed)
row_count = get_param("row_count", 100)
cmd_count = get_param("cmd_count", 100)
addr_width = get_param("addr_width", random.randint(32, 64))
data_width = get_param("data_width", 1 << random.randint(5, 9))
burst_step_len = get_param("burst_step_len", max(self.field.widest() // data_width, 1 << random.randint(0, 5)))
burst_max_len = get_param("burst_max_len", burst_step_len * (1 << random.randint(0, 4)))
len_width = get_param("len_width", random.randint(1, 4) * int.bit_length(burst_max_len))
tag_width = get_param("tag_width", random.choice([0, 1, 4]))
multi_clk = get_param("multi_clk", True)
random_bus_rreq_timing = get_param("random_bus_rreq_timing", random.choice([True, False]))
random_bus_rdat_timing = get_param("random_bus_rdat_timing", random.choice([True, False]))
# Generate the testbench wrapper object.
acc = "acc" if multi_clk else "bus"
tb = Testbench(self._camel_prefix + "ColumnReader_tb", {"bus", acc})
# Set constants.
tb.set_const("BUS_ADDR_WIDTH", addr_width)
tb.set_const("BUS_LEN_WIDTH", len_width)
tb.set_const("BUS_DATA_WIDTH", data_width)
tb.set_const("BUS_BURST_STEP_LEN", burst_step_len)
tb.set_const("BUS_BURST_MAX_LEN", burst_max_len)
tb.set_const("INDEX_WIDTH", 32)
tb.set_const("CMD_TAG_ENABLE", tag_width > 0)
tb.set_const("CMD_TAG_WIDTH", max(1, tag_width))
# Add the streams.
tb.append_input_stream(self.cmd_stream, "bus")
if tag_width > 0:
tb.append_output_stream(self.unlock_stream, "bus")
tb.append_output_stream(self.bus_rreq_stream, "bus")
tb.append_input_stream(self.bus_rdat_stream, "bus")
tb.append_output_stream(self.out_stream, acc)
# Generate a random set of commands.
commands = []
for _ in range(cmd_count):
a = random.randrange(row_count)
b = random.randrange(row_count)
commands.append((min(a, b), max(a, b) + 1))
# Generate toplevel command stream signal test vectors.
cmd_first_tv = tb.append_test_vector(TestVectors(self.cmd_stream.signals[0]))
cmd_last_tv = tb.append_test_vector(TestVectors(self.cmd_stream.signals[1]))
for start, stop in commands:
cmd_first_tv.append(start)
cmd_last_tv.append(stop)
# Generate tag stream signal test vectors.
if tag_width > 0:
tags = [random.randrange(1 << tag_width) for _ in commands]
tb.append_test_vector(TestVectors(self.cmd_stream.signals[-1])).extend(tags)
tb.append_test_vector(TestVectors(self.unlock_stream.signals[0])).extend(tags)
# Generate output stream master last/dvalid test vectors.
out_last_tv = tb.append_test_vector(TestVectors(self.out_stream.streams[0].signals[0]))
out_dvalid_tv = tb.append_test_vector(TestVectors(self.out_stream.streams[0].signals[1]))
for start, stop in commands:
for i in range(start, stop):
out_last_tv.append(int(i == stop - 1))
out_dvalid_tv.append(1)
# Generate a memory model.
memory = Memory()
tb.append_memory(memory, self.bus_rreq_stream, self.bus_rdat_stream, "bus",
random_bus_rreq_timing, random_bus_rdat_timing)
# Generate the test vectors for the readers.
tvs = self.reader.test_vectors(memory, row_count, commands)
for tv in tvs:
tb.append_test_vector(tv)
# Append unit under test.
template = uut_template_with_unlock if tag_width > 0 else uut_template_without_unlock
tb.append_uut(template.format(cfg=self.cfg(), acc=acc))
# Add documentation.
doc = []
doc.append("Memory dump:")
doc.extend([" " + x for x in memory.hexdump().split("\n")])
doc.append("")
doc.append("Command stream:")
transfer = 1
for i, (start, end) in enumerate(commands):
doc.append(" Command %3d: %4d to %4d = out transfer %5d to %5d" % (
i + 1, start, end - 1, transfer, transfer + (end - start - 1)))
transfer += end - start
doc.append("")
doc.append("Generator parameters:")
doc.extend([" %s: %s" % x for x in params])
doc.append("")
doc.append("Schema:")
doc.extend([" " + x for x in self.field.pprint().split("\n")])
tb.append_uut("\n".join([" -- " + x for x in doc]))
return str(tb) | def _write_buffer(cls, memory, bits, data):
"""Writes an arrow buffer to the given Memory given a list of integers
and bit width."""
memory.align(max(8*64, bits)) |
LegendTaxonomyConfig.ts | /**
* Copyright (c) 2020-present, Goldman Sachs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
type PlainObject,
AssertionError,
guaranteeNonNullable,
assertNonNullable,
guaranteeNonEmptyString,
SerializationFactory,
} from '@finos/legend-shared';
import {
LegendApplicationConfig,
type LegendApplicationConfigurationData,
type LegendApplicationVersionData,
} from '@finos/legend-application';
import { createModelSchema, optional, primitive } from 'serializr';
import { action, computed, makeObservable, observable } from 'mobx';
export class TaxonomyTreeOption {
label!: string;
key!: string;
url!: string;
default?: boolean;
static readonly serialization = new SerializationFactory(
createModelSchema(TaxonomyTreeOption, {
default: optional(primitive()),
label: primitive(),
key: primitive(),
url: primitive(),
}),
);
}
export interface LegendTaxonomyConfigurationData
extends LegendApplicationConfigurationData {
appName: string; | depot: {
url: string;
/**
* This is needed since some of our legacy infrastructure does not yet support
* the new API calls, we need to update them to use the latest version of
* finos/legend-depot though
*/
TEMPORARY__useLegacyDepotServerAPIRoutes?: boolean;
};
engine: { url: string; queryUrl?: string };
query: { url: string };
studio: { url: string };
taxonomy: PlainObject<TaxonomyTreeOption>[];
extensions?: Record<PropertyKey, unknown>;
}
export class LegendTaxonomyConfig extends LegendApplicationConfig {
readonly engineServerUrl: string;
readonly engineQueryServerUrl?: string | undefined;
readonly depotServerUrl: string;
readonly queryUrl: string;
readonly studioUrl: string;
readonly TEMPORARY__useLegacyDepotServerAPIRoutes?: boolean | undefined;
currentTaxonomyTreeOption!: TaxonomyTreeOption;
taxonomyTreeOptions: TaxonomyTreeOption[] = [];
constructor(
configData: LegendTaxonomyConfigurationData,
versionData: LegendApplicationVersionData,
baseUrl: string,
) {
super(configData, versionData, baseUrl);
makeObservable(this, {
currentTaxonomyTreeOption: observable,
defaultTaxonomyTreeOption: computed,
setCurrentTaxonomyTreeOption: action,
});
assertNonNullable(
configData.taxonomy,
`Can't configure application: 'taxonomy' field is missing`,
);
if (Array.isArray(configData.taxonomy)) {
const options = configData.taxonomy.map((optionData) =>
TaxonomyTreeOption.serialization.fromJson(optionData),
);
if (options.length === 0) {
throw new AssertionError(
`Can't configure application: 'taxonomy' field has no entry`,
);
}
// Make sure the specified instances are unique by key
if (
new Set(options.map((instance) => instance.key)).size !== options.length
) {
throw new AssertionError(
`Can't configure application: 'taxonomy' field consists of entries with duplicated keys`,
);
}
// Make sure default option is set properly
if (options.filter((instance) => instance.default).length === 0) {
throw new AssertionError(
`Can't configure application: 'taxonomy' field consists of no default entry`,
);
}
if (options.filter((instance) => instance.default).length > 1) {
throw new AssertionError(
`Can't configure application: 'taxonomy' field consists of multiple default entries`,
);
}
this.taxonomyTreeOptions = options;
} else {
throw new AssertionError(
`Can't configure application: 'taxonomy' field is not a list`,
);
}
this.currentTaxonomyTreeOption = this.defaultTaxonomyTreeOption;
assertNonNullable(
configData.engine,
`Can't configure application: 'engine' field is missing`,
);
assertNonNullable(
configData.engine,
`Can't configure application: 'engine' field is missing`,
);
this.engineServerUrl = guaranteeNonEmptyString(
configData.engine.url,
`Can't configure application: 'engine.url' field is missing or empty`,
);
this.engineQueryServerUrl = configData.engine.queryUrl;
this.depotServerUrl = guaranteeNonEmptyString(
configData.depot.url,
`Can't configure application: 'depot.url' field is missing or empty`,
);
this.queryUrl = guaranteeNonEmptyString(
configData.query.url,
`Can't configure application: 'query.url' field is missing or empty`,
);
this.studioUrl = guaranteeNonEmptyString(
configData.studio.url,
`Can't configure application: 'studio.url' field is missing or empty`,
);
this.TEMPORARY__useLegacyDepotServerAPIRoutes =
configData.depot.TEMPORARY__useLegacyDepotServerAPIRoutes;
}
get defaultTaxonomyTreeOption(): TaxonomyTreeOption {
return guaranteeNonNullable(
this.taxonomyTreeOptions.find((option) => option.default),
`Can't find a default taxonomy tree option`,
);
}
setCurrentTaxonomyTreeOption(val: TaxonomyTreeOption): void {
this.currentTaxonomyTreeOption = val;
}
} | env: string; |
mod.rs | pub mod locale;
use actix_web::http::StatusCode;
use handlebars::Handlebars;
use serde::ser::Serialize;
use super::{errors::Error, orm::Connection};
use self::locale::Dao;
pub trait I18n {
fn exist(&self, lang: &str) -> bool;
fn tr<S: Serialize>(&self, lang: &str, code: &str, args: &Option<S>) -> Option<String>;
fn t<C: Into<String>, S: Serialize>(&self, lang: &str, code: C, args: &Option<S>) -> String;
fn e<C: Into<String>, S: Serialize>(&self, lang: &str, code: C, args: &Option<S>) -> Error;
}
impl I18n for Connection {
fn exist(&self, lang: &str) -> bool {
if let Ok(items) = Dao::languages(self) {
return items.contains(&lang.to_string());
}
false
}
fn tr<S: Serialize>(&self, lang: &str, code: &str, args: &Option<S>) -> Option<String> {
if let Ok(it) = Dao::by_lang_and_code(self, lang, code) {
let reg = Handlebars::new();
match reg.render_template(&it.message, args) {
Ok(msg) => {
return Some(msg);
}
Err(e) => |
};
}
None
}
fn t<C: Into<String>, S: Serialize>(&self, lang: &str, code: C, args: &Option<S>) -> String {
let code = code.into();
match self.tr(lang, &code, args) {
Some(msg) => msg,
None => format!("{}.{}", lang, code),
}
}
fn e<C: Into<String>, S: Serialize>(&self, lang: &str, code: C, args: &Option<S>) -> Error {
Error::Http(
StatusCode::INTERNAL_SERVER_ERROR,
Some(self.t(lang, code, args)),
)
}
}
| {
error!("{:?}", e);
} |
twitter_hashtag_frequency.py | from collections import Counter
import json
def get_hashtags(tweet):
entities = tweet.get('entities', {})
hashtags = entities.get('hashtags', [])
return [tag['text'].lower() for tag in hashtags]
if __name__ == '__main__':
fname = sys.argv[1]
with open(fname, 'r') as f:
hashtags = Counter()
for line in f:
tweet = json.loads(line)
hashtags_in_tweet = get_hashtags(tweet)
hashtags.update(hashtags_in_tweet)
for tag, count in hashtags.most_common(20):
print("{}: {}".format(tag, count)) | # Chap02/twitter_hashtag_frequency.py
import sys |
|
ListTableRowButtons.js | import React from "react";
import {ListTableCell} from "./ListTableCell";
/**
* Container component for TableRow Action Buttons
* @param buttons a lits of buttons
* @param value an id of the row element
*
* Calls a SimpleButton component to render each button
*/
export const ListTableRowButtons = ({buttons, value}) => {
return ( | <ListTableCell
contents={
buttons.map((button) => (
<button key={button.name} variant="outlined" value={value} className="btn btn-primary"
onClick={button.function}>{button.name}</button>
))
}
/>
)
} | |
manage.py | import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from scraper import scrape
import sys
from app import app, db
app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def | (arg_min_ilvl):
arg_min_ilvl = float(arg_min_ilvl)
scrape(arg_min_ilvl)
if __name__ == '__main__':
manager.run() | run_scrape |
resource_aws_default_security_group.go | package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsDefaultSecurityGroup() *schema.Resource |
func resourceAwsDefaultSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).ec2conn
securityGroupOpts := &ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
&ec2.Filter{
Name: aws.String("group-name"),
Values: []*string{aws.String("default")},
},
},
}
var vpcId string
if v, ok := d.GetOk("vpc_id"); ok {
vpcId = v.(string)
securityGroupOpts.Filters = append(securityGroupOpts.Filters, &ec2.Filter{
Name: aws.String("vpc-id"),
Values: []*string{aws.String(vpcId)},
})
}
var err error
log.Printf("[DEBUG] Commandeer Default Security Group: %s", securityGroupOpts)
resp, err := conn.DescribeSecurityGroups(securityGroupOpts)
if err != nil {
return fmt.Errorf("Error creating Default Security Group: %s", err)
}
var g *ec2.SecurityGroup
if vpcId != "" {
// if vpcId contains a value, then we expect just a single Security Group
// returned, as default is a protected name for each VPC, and for each
// Region on EC2 Classic
if len(resp.SecurityGroups) != 1 {
return fmt.Errorf("[ERR] Error finding default security group; found (%d) groups: %s", len(resp.SecurityGroups), resp)
}
g = resp.SecurityGroups[0]
} else {
// we need to filter through any returned security groups for the group
// named "default", and does not belong to a VPC
for _, sg := range resp.SecurityGroups {
if sg.VpcId == nil && *sg.GroupName == "default" {
g = sg
}
}
}
if g == nil {
return fmt.Errorf("[ERR] Error finding default security group: no matching group found")
}
d.SetId(*g.GroupId)
log.Printf("[INFO] Default Security Group ID: %s", d.Id())
if err := setTags(conn, d); err != nil {
return err
}
if err := revokeDefaultSecurityGroupRules(meta, g); err != nil {
return errwrap.Wrapf("{{err}}", err)
}
return resourceAwsSecurityGroupUpdate(d, meta)
}
func resourceAwsDefaultSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
log.Printf("[WARN] Cannot destroy Default Security Group. Terraform will remove this resource from the state file, however resources may remain.")
return nil
}
func revokeDefaultSecurityGroupRules(meta interface{}, g *ec2.SecurityGroup) error {
conn := meta.(*AWSClient).ec2conn
log.Printf("[WARN] Removing all ingress and egress rules found on Default Security Group (%s)", *g.GroupId)
if len(g.IpPermissionsEgress) > 0 {
req := &ec2.RevokeSecurityGroupEgressInput{
GroupId: g.GroupId,
IpPermissions: g.IpPermissionsEgress,
}
log.Printf("[DEBUG] Revoking default egress rules for Default Security Group for %s", *g.GroupId)
if _, err := conn.RevokeSecurityGroupEgress(req); err != nil {
return fmt.Errorf(
"Error revoking default egress rules for Default Security Group (%s): %s",
*g.GroupId, err)
}
}
if len(g.IpPermissions) > 0 {
// a limitation in EC2 Classic is that a call to RevokeSecurityGroupIngress
// cannot contain both the GroupName and the GroupId
for _, p := range g.IpPermissions {
for _, uigp := range p.UserIdGroupPairs {
if uigp.GroupId != nil && uigp.GroupName != nil {
uigp.GroupName = nil
}
}
}
req := &ec2.RevokeSecurityGroupIngressInput{
GroupId: g.GroupId,
IpPermissions: g.IpPermissions,
}
log.Printf("[DEBUG] Revoking default ingress rules for Default Security Group for (%s): %s", *g.GroupId, req)
if _, err := conn.RevokeSecurityGroupIngress(req); err != nil {
return fmt.Errorf(
"Error revoking default ingress rules for Default Security Group (%s): %s",
*g.GroupId, err)
}
}
return nil
}
| {
// reuse aws_security_group_rule schema, and methods for READ, UPDATE
dsg := resourceAwsSecurityGroup()
dsg.Create = resourceAwsDefaultSecurityGroupCreate
dsg.Delete = resourceAwsDefaultSecurityGroupDelete
// Descriptions cannot be updated
delete(dsg.Schema, "description")
// name is a computed value for Default Security Groups and cannot be changed
delete(dsg.Schema, "name_prefix")
dsg.Schema["name"] = &schema.Schema{
Type: schema.TypeString,
Computed: true,
}
// We want explicit management of Rules here, so we do not allow them to be
// computed. Instead, an empty config will enforce just that; removal of the
// rules
dsg.Schema["ingress"].Computed = false
dsg.Schema["egress"].Computed = false
return dsg
} |
transport_errcatch.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package api
import (
"fmt"
"io"
"net/http"
"net/http/httputil"
"strconv"
"strings"
"github.com/elastic/cloud-sdk-go/pkg/api/mock"
"github.com/elastic/cloud-sdk-go/pkg/models"
"github.com/elastic/cloud-sdk-go/pkg/util/ec"
)
const (
contentType = "Content-Type"
textHTMLContentType = "text/html"
jsonContentType = "application/json"
)
// DefaultTransport can be used by clients which rely on the api.UnwrapError
// Can obtain the underlying http.Response is returned with a StatusCode not
// defined within the swagger spec from which the models have been generated.
// Meaning this is a small hack which allows http.Response.Body to be accessed.
// See error.go in the same package for details on how UnwrapError works. Note
// that using this variable directly won't allow any of the http.Transport
// setttings to be overridden. To customize the transport further, please use
// NewTransport()
var DefaultTransport = new(ErrCatchTransport)
// NewErrCatchTransport initialises an ErrCatchTransport. See GoDoc for more
// help on this type.
func NewErrCatchTransport(rt http.RoundTripper) *ErrCatchTransport {
return &ErrCatchTransport{rt: rt}
}
// ErrCatchTransport is an http.RoundTripper that which allows the http.Response
// to be accessed in certain types of wrapped errors returned by autogenerated
// code.
// See error.go in the same package for details on how UnwrapError works.
type ErrCatchTransport struct {
rt http.RoundTripper
}
// RoundTrip wraps http.DefaultTransport.RoundTrip to keep track
// of the current request.
func (e *ErrCatchTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if e.rt == nil {
newDefaultTransport(0)
}
res, err := e.rt.RoundTrip(req)
if res != nil {
_, _ = httputil.DumpResponse(res, res.Body != nil)
// When the content type is "text/html", a bit of tweaking is required
// for the response to be marshaled to JSON. Using the standard error
// definition and populating it with parts of the request so the error
// can be identified.
if strings.Contains(res.Header.Get(contentType), textHTMLContentType) {
res.Header.Set(contentType, jsonContentType)
res.Body = newProxyBody(req, res.StatusCode)
}
}
return res, err
}
func | (req *http.Request, code int) io.ReadCloser {
return mock.NewStructBody(models.BasicFailedReply{
Errors: []*models.BasicFailedReplyElement{
{
Code: ec.String(strconv.Itoa(code)),
Fields: []string{fmt.Sprintf("%s %s", req.Method, req.URL.EscapedPath())},
Message: ec.String(http.StatusText(code)),
},
},
})
}
| newProxyBody |
models.go | package eventgrid
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// The package's fully qualified name.
const fqdn = "github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2019-01-01/eventgrid"
// BasicDeadLetterDestination information about the dead letter destination for an event subscription. To configure a
// deadletter destination, do not directly instantiate an object of this class. Instead, instantiate an object of a
// derived class. Currently, StorageBlobDeadLetterDestination is the only class that derives from this class.
type BasicDeadLetterDestination interface {
AsStorageBlobDeadLetterDestination() (*StorageBlobDeadLetterDestination, bool)
AsDeadLetterDestination() (*DeadLetterDestination, bool)
}
// DeadLetterDestination information about the dead letter destination for an event subscription. To configure
// a deadletter destination, do not directly instantiate an object of this class. Instead, instantiate an
// object of a derived class. Currently, StorageBlobDeadLetterDestination is the only class that derives from
// this class.
type DeadLetterDestination struct {
// EndpointType - Possible values include: 'EndpointTypeDeadLetterDestination', 'EndpointTypeStorageBlob'
EndpointType EndpointTypeBasicDeadLetterDestination `json:"endpointType,omitempty"`
}
func unmarshalBasicDeadLetterDestination(body []byte) (BasicDeadLetterDestination, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["endpointType"] {
case string(EndpointTypeStorageBlob):
var sbdld StorageBlobDeadLetterDestination
err := json.Unmarshal(body, &sbdld)
return sbdld, err
default:
var dld DeadLetterDestination
err := json.Unmarshal(body, &dld)
return dld, err
}
}
func unmarshalBasicDeadLetterDestinationArray(body []byte) ([]BasicDeadLetterDestination, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
dldArray := make([]BasicDeadLetterDestination, len(rawMessages))
for index, rawMessage := range rawMessages {
dld, err := unmarshalBasicDeadLetterDestination(*rawMessage)
if err != nil {
return nil, err
}
dldArray[index] = dld
}
return dldArray, nil
}
// MarshalJSON is the custom marshaler for DeadLetterDestination.
func (dld DeadLetterDestination) MarshalJSON() ([]byte, error) {
dld.EndpointType = EndpointTypeDeadLetterDestination
objectMap := make(map[string]interface{})
if dld.EndpointType != "" {
objectMap["endpointType"] = dld.EndpointType
}
return json.Marshal(objectMap)
}
// AsStorageBlobDeadLetterDestination is the BasicDeadLetterDestination implementation for DeadLetterDestination.
func (dld DeadLetterDestination) AsStorageBlobDeadLetterDestination() (*StorageBlobDeadLetterDestination, bool) {
return nil, false
}
// AsDeadLetterDestination is the BasicDeadLetterDestination implementation for DeadLetterDestination.
func (dld DeadLetterDestination) AsDeadLetterDestination() (*DeadLetterDestination, bool) {
return &dld, true
}
// AsBasicDeadLetterDestination is the BasicDeadLetterDestination implementation for DeadLetterDestination.
func (dld DeadLetterDestination) AsBasicDeadLetterDestination() (BasicDeadLetterDestination, bool) {
return &dld, true
}
// EventHubEventSubscriptionDestination information about the event hub destination for an event
// subscription
type EventHubEventSubscriptionDestination struct {
// EventHubEventSubscriptionDestinationProperties - Event Hub Properties of the event subscription destination
*EventHubEventSubscriptionDestinationProperties `json:"properties,omitempty"`
// EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection'
EndpointType EndpointType `json:"endpointType,omitempty"`
}
// MarshalJSON is the custom marshaler for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) MarshalJSON() ([]byte, error) {
ehesd.EndpointType = EndpointTypeEventHub
objectMap := make(map[string]interface{})
if ehesd.EventHubEventSubscriptionDestinationProperties != nil {
objectMap["properties"] = ehesd.EventHubEventSubscriptionDestinationProperties
}
if ehesd.EndpointType != "" {
objectMap["endpointType"] = ehesd.EndpointType
}
return json.Marshal(objectMap)
}
// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) {
return &ehesd, true
}
// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) {
return nil, false
}
// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) {
return nil, false
}
// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventHubEventSubscriptionDestination.
func (ehesd EventHubEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) {
return &ehesd, true
}
// UnmarshalJSON is the custom unmarshaler for EventHubEventSubscriptionDestination struct.
func (ehesd *EventHubEventSubscriptionDestination) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var eventHubEventSubscriptionDestinationProperties EventHubEventSubscriptionDestinationProperties
err = json.Unmarshal(*v, &eventHubEventSubscriptionDestinationProperties)
if err != nil {
return err
}
ehesd.EventHubEventSubscriptionDestinationProperties = &eventHubEventSubscriptionDestinationProperties
}
case "endpointType":
if v != nil {
var endpointType EndpointType
err = json.Unmarshal(*v, &endpointType)
if err != nil {
return err
}
ehesd.EndpointType = endpointType
}
}
}
return nil
}
// EventHubEventSubscriptionDestinationProperties the properties for a event hub destination.
type EventHubEventSubscriptionDestinationProperties struct {
// ResourceID - The Azure Resource Id that represents the endpoint of an Event Hub destination of an event subscription.
ResourceID *string `json:"resourceId,omitempty"`
}
// EventSubscription event Subscription
type EventSubscription struct {
autorest.Response `json:"-"`
// EventSubscriptionProperties - Properties of the event subscription
*EventSubscriptionProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Fully qualified identifier of the resource
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Type of the resource
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for EventSubscription.
func (es EventSubscription) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if es.EventSubscriptionProperties != nil {
objectMap["properties"] = es.EventSubscriptionProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for EventSubscription struct.
func (es *EventSubscription) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var eventSubscriptionProperties EventSubscriptionProperties
err = json.Unmarshal(*v, &eventSubscriptionProperties)
if err != nil {
return err
}
es.EventSubscriptionProperties = &eventSubscriptionProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
es.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
es.Name = &name
}
case "type": | if err != nil {
return err
}
es.Type = &typeVar
}
}
}
return nil
}
// BasicEventSubscriptionDestination information about the destination for an event subscription
type BasicEventSubscriptionDestination interface {
AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool)
AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool)
AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool)
AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool)
AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool)
}
// EventSubscriptionDestination information about the destination for an event subscription
type EventSubscriptionDestination struct {
// EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection'
EndpointType EndpointType `json:"endpointType,omitempty"`
}
func unmarshalBasicEventSubscriptionDestination(body []byte) (BasicEventSubscriptionDestination, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["endpointType"] {
case string(EndpointTypeWebHook):
var whesd WebHookEventSubscriptionDestination
err := json.Unmarshal(body, &whesd)
return whesd, err
case string(EndpointTypeEventHub):
var ehesd EventHubEventSubscriptionDestination
err := json.Unmarshal(body, &ehesd)
return ehesd, err
case string(EndpointTypeStorageQueue):
var sqesd StorageQueueEventSubscriptionDestination
err := json.Unmarshal(body, &sqesd)
return sqesd, err
case string(EndpointTypeHybridConnection):
var hcesd HybridConnectionEventSubscriptionDestination
err := json.Unmarshal(body, &hcesd)
return hcesd, err
default:
var esd EventSubscriptionDestination
err := json.Unmarshal(body, &esd)
return esd, err
}
}
func unmarshalBasicEventSubscriptionDestinationArray(body []byte) ([]BasicEventSubscriptionDestination, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
esdArray := make([]BasicEventSubscriptionDestination, len(rawMessages))
for index, rawMessage := range rawMessages {
esd, err := unmarshalBasicEventSubscriptionDestination(*rawMessage)
if err != nil {
return nil, err
}
esdArray[index] = esd
}
return esdArray, nil
}
// MarshalJSON is the custom marshaler for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) MarshalJSON() ([]byte, error) {
esd.EndpointType = EndpointTypeEventSubscriptionDestination
objectMap := make(map[string]interface{})
if esd.EndpointType != "" {
objectMap["endpointType"] = esd.EndpointType
}
return json.Marshal(objectMap)
}
// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) {
return nil, false
}
// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) {
return nil, false
}
// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) {
return &esd, true
}
// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for EventSubscriptionDestination.
func (esd EventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) {
return &esd, true
}
// EventSubscriptionFilter filter for the Event Subscription
type EventSubscriptionFilter struct {
// SubjectBeginsWith - An optional string to filter events for an event subscription based on a resource path prefix.
// The format of this depends on the publisher of the events.
// Wildcard characters are not supported in this path.
SubjectBeginsWith *string `json:"subjectBeginsWith,omitempty"`
// SubjectEndsWith - An optional string to filter events for an event subscription based on a resource path suffix.
// Wildcard characters are not supported in this path.
SubjectEndsWith *string `json:"subjectEndsWith,omitempty"`
// IncludedEventTypes - A list of applicable event types that need to be part of the event subscription.
// If it is desired to subscribe to all event types, the string "all" needs to be specified as an element in this list.
IncludedEventTypes *[]string `json:"includedEventTypes,omitempty"`
// IsSubjectCaseSensitive - Specifies if the SubjectBeginsWith and SubjectEndsWith properties of the filter
// should be compared in a case sensitive manner.
IsSubjectCaseSensitive *bool `json:"isSubjectCaseSensitive,omitempty"`
}
// EventSubscriptionFullURL full endpoint url of an event subscription
type EventSubscriptionFullURL struct {
autorest.Response `json:"-"`
// EndpointURL - The URL that represents the endpoint of the destination of an event subscription.
EndpointURL *string `json:"endpointUrl,omitempty"`
}
// EventSubscriptionProperties properties of the Event Subscription
type EventSubscriptionProperties struct {
// Topic - READ-ONLY; Name of the topic of the event subscription.
Topic *string `json:"topic,omitempty"`
// ProvisioningState - READ-ONLY; Provisioning state of the event subscription. Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Canceled', 'Failed', 'AwaitingManualAction'
ProvisioningState EventSubscriptionProvisioningState `json:"provisioningState,omitempty"`
// Destination - Information about the destination where events have to be delivered for the event subscription.
Destination BasicEventSubscriptionDestination `json:"destination,omitempty"`
// Filter - Information about the filter for the event subscription.
Filter *EventSubscriptionFilter `json:"filter,omitempty"`
// Labels - List of user defined labels.
Labels *[]string `json:"labels,omitempty"`
// RetryPolicy - The retry policy for events. This can be used to configure maximum number of delivery attempts and time to live for events.
RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"`
// DeadLetterDestination - The DeadLetter destination of the event subscription.
DeadLetterDestination BasicDeadLetterDestination `json:"deadLetterDestination,omitempty"`
}
// MarshalJSON is the custom marshaler for EventSubscriptionProperties.
func (esp EventSubscriptionProperties) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
objectMap["destination"] = esp.Destination
if esp.Filter != nil {
objectMap["filter"] = esp.Filter
}
if esp.Labels != nil {
objectMap["labels"] = esp.Labels
}
if esp.RetryPolicy != nil {
objectMap["retryPolicy"] = esp.RetryPolicy
}
objectMap["deadLetterDestination"] = esp.DeadLetterDestination
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for EventSubscriptionProperties struct.
func (esp *EventSubscriptionProperties) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "topic":
if v != nil {
var topic string
err = json.Unmarshal(*v, &topic)
if err != nil {
return err
}
esp.Topic = &topic
}
case "provisioningState":
if v != nil {
var provisioningState EventSubscriptionProvisioningState
err = json.Unmarshal(*v, &provisioningState)
if err != nil {
return err
}
esp.ProvisioningState = provisioningState
}
case "destination":
if v != nil {
destination, err := unmarshalBasicEventSubscriptionDestination(*v)
if err != nil {
return err
}
esp.Destination = destination
}
case "filter":
if v != nil {
var filter EventSubscriptionFilter
err = json.Unmarshal(*v, &filter)
if err != nil {
return err
}
esp.Filter = &filter
}
case "labels":
if v != nil {
var labels []string
err = json.Unmarshal(*v, &labels)
if err != nil {
return err
}
esp.Labels = &labels
}
case "retryPolicy":
if v != nil {
var retryPolicy RetryPolicy
err = json.Unmarshal(*v, &retryPolicy)
if err != nil {
return err
}
esp.RetryPolicy = &retryPolicy
}
case "deadLetterDestination":
if v != nil {
deadLetterDestination, err := unmarshalBasicDeadLetterDestination(*v)
if err != nil {
return err
}
esp.DeadLetterDestination = deadLetterDestination
}
}
}
return nil
}
// EventSubscriptionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type EventSubscriptionsCreateOrUpdateFuture struct {
azure.FutureAPI
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
Result func(EventSubscriptionsClient) (EventSubscription, error)
}
// UnmarshalJSON is the custom unmarshaller for CreateFuture.
func (future *EventSubscriptionsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
var azFuture azure.Future
if err := json.Unmarshal(body, &azFuture); err != nil {
return err
}
future.FutureAPI = &azFuture
future.Result = future.result
return nil
}
// result is the default implementation for EventSubscriptionsCreateOrUpdateFuture.Result.
func (future *EventSubscriptionsCreateOrUpdateFuture) result(client EventSubscriptionsClient) (es EventSubscription, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if es.Response.Response, err = future.GetResult(sender); err == nil && es.Response.Response.StatusCode != http.StatusNoContent {
es, err = client.CreateOrUpdateResponder(es.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsCreateOrUpdateFuture", "Result", es.Response.Response, "Failure responding to request")
}
}
return
}
// EventSubscriptionsDeleteFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type EventSubscriptionsDeleteFuture struct {
azure.FutureAPI
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
Result func(EventSubscriptionsClient) (autorest.Response, error)
}
// UnmarshalJSON is the custom unmarshaller for CreateFuture.
func (future *EventSubscriptionsDeleteFuture) UnmarshalJSON(body []byte) error {
var azFuture azure.Future
if err := json.Unmarshal(body, &azFuture); err != nil {
return err
}
future.FutureAPI = &azFuture
future.Result = future.result
return nil
}
// result is the default implementation for EventSubscriptionsDeleteFuture.Result.
func (future *EventSubscriptionsDeleteFuture) result(client EventSubscriptionsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// EventSubscriptionsListResult result of the List EventSubscriptions operation
type EventSubscriptionsListResult struct {
autorest.Response `json:"-"`
// Value - A collection of EventSubscriptions
Value *[]EventSubscription `json:"value,omitempty"`
}
// EventSubscriptionsUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type EventSubscriptionsUpdateFuture struct {
azure.FutureAPI
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
Result func(EventSubscriptionsClient) (EventSubscription, error)
}
// UnmarshalJSON is the custom unmarshaller for CreateFuture.
func (future *EventSubscriptionsUpdateFuture) UnmarshalJSON(body []byte) error {
var azFuture azure.Future
if err := json.Unmarshal(body, &azFuture); err != nil {
return err
}
future.FutureAPI = &azFuture
future.Result = future.result
return nil
}
// result is the default implementation for EventSubscriptionsUpdateFuture.Result.
func (future *EventSubscriptionsUpdateFuture) result(client EventSubscriptionsClient) (es EventSubscription, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("eventgrid.EventSubscriptionsUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if es.Response.Response, err = future.GetResult(sender); err == nil && es.Response.Response.StatusCode != http.StatusNoContent {
es, err = client.UpdateResponder(es.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.EventSubscriptionsUpdateFuture", "Result", es.Response.Response, "Failure responding to request")
}
}
return
}
// EventSubscriptionUpdateParameters properties of the Event Subscription update
type EventSubscriptionUpdateParameters struct {
// Destination - Information about the destination where events have to be delivered for the event subscription.
Destination BasicEventSubscriptionDestination `json:"destination,omitempty"`
// Filter - Information about the filter for the event subscription.
Filter *EventSubscriptionFilter `json:"filter,omitempty"`
// Labels - List of user defined labels.
Labels *[]string `json:"labels,omitempty"`
// RetryPolicy - The retry policy for events. This can be used to configure maximum number of delivery attempts and time to live for events.
RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"`
// DeadLetterDestination - The DeadLetter destination of the event subscription.
DeadLetterDestination BasicDeadLetterDestination `json:"deadLetterDestination,omitempty"`
}
// UnmarshalJSON is the custom unmarshaler for EventSubscriptionUpdateParameters struct.
func (esup *EventSubscriptionUpdateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "destination":
if v != nil {
destination, err := unmarshalBasicEventSubscriptionDestination(*v)
if err != nil {
return err
}
esup.Destination = destination
}
case "filter":
if v != nil {
var filter EventSubscriptionFilter
err = json.Unmarshal(*v, &filter)
if err != nil {
return err
}
esup.Filter = &filter
}
case "labels":
if v != nil {
var labels []string
err = json.Unmarshal(*v, &labels)
if err != nil {
return err
}
esup.Labels = &labels
}
case "retryPolicy":
if v != nil {
var retryPolicy RetryPolicy
err = json.Unmarshal(*v, &retryPolicy)
if err != nil {
return err
}
esup.RetryPolicy = &retryPolicy
}
case "deadLetterDestination":
if v != nil {
deadLetterDestination, err := unmarshalBasicDeadLetterDestination(*v)
if err != nil {
return err
}
esup.DeadLetterDestination = deadLetterDestination
}
}
}
return nil
}
// EventType event Type for a subject under a topic
type EventType struct {
// EventTypeProperties - Properties of the event type.
*EventTypeProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Fully qualified identifier of the resource
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Type of the resource
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for EventType.
func (et EventType) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if et.EventTypeProperties != nil {
objectMap["properties"] = et.EventTypeProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for EventType struct.
func (et *EventType) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var eventTypeProperties EventTypeProperties
err = json.Unmarshal(*v, &eventTypeProperties)
if err != nil {
return err
}
et.EventTypeProperties = &eventTypeProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
et.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
et.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
et.Type = &typeVar
}
}
}
return nil
}
// EventTypeProperties properties of the event type
type EventTypeProperties struct {
// DisplayName - Display name of the event type.
DisplayName *string `json:"displayName,omitempty"`
// Description - Description of the event type.
Description *string `json:"description,omitempty"`
// SchemaURL - Url of the schema for this event type.
SchemaURL *string `json:"schemaUrl,omitempty"`
}
// EventTypesListResult result of the List Event Types operation
type EventTypesListResult struct {
autorest.Response `json:"-"`
// Value - A collection of event types
Value *[]EventType `json:"value,omitempty"`
}
// HybridConnectionEventSubscriptionDestination information about the HybridConnection destination for an
// event subscription.
type HybridConnectionEventSubscriptionDestination struct {
// HybridConnectionEventSubscriptionDestinationProperties - Hybrid connection Properties of the event subscription destination
*HybridConnectionEventSubscriptionDestinationProperties `json:"properties,omitempty"`
// EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection'
EndpointType EndpointType `json:"endpointType,omitempty"`
}
// MarshalJSON is the custom marshaler for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) MarshalJSON() ([]byte, error) {
hcesd.EndpointType = EndpointTypeHybridConnection
objectMap := make(map[string]interface{})
if hcesd.HybridConnectionEventSubscriptionDestinationProperties != nil {
objectMap["properties"] = hcesd.HybridConnectionEventSubscriptionDestinationProperties
}
if hcesd.EndpointType != "" {
objectMap["endpointType"] = hcesd.EndpointType
}
return json.Marshal(objectMap)
}
// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) {
return nil, false
}
// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) {
return nil, false
}
// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) {
return &hcesd, true
}
// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) {
return nil, false
}
// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for HybridConnectionEventSubscriptionDestination.
func (hcesd HybridConnectionEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) {
return &hcesd, true
}
// UnmarshalJSON is the custom unmarshaler for HybridConnectionEventSubscriptionDestination struct.
func (hcesd *HybridConnectionEventSubscriptionDestination) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var hybridConnectionEventSubscriptionDestinationProperties HybridConnectionEventSubscriptionDestinationProperties
err = json.Unmarshal(*v, &hybridConnectionEventSubscriptionDestinationProperties)
if err != nil {
return err
}
hcesd.HybridConnectionEventSubscriptionDestinationProperties = &hybridConnectionEventSubscriptionDestinationProperties
}
case "endpointType":
if v != nil {
var endpointType EndpointType
err = json.Unmarshal(*v, &endpointType)
if err != nil {
return err
}
hcesd.EndpointType = endpointType
}
}
}
return nil
}
// HybridConnectionEventSubscriptionDestinationProperties the properties for a hybrid connection
// destination.
type HybridConnectionEventSubscriptionDestinationProperties struct {
// ResourceID - The Azure Resource ID of an hybrid connection that is the destination of an event subscription.
ResourceID *string `json:"resourceId,omitempty"`
}
// Operation represents an operation returned by the GetOperations request
type Operation struct {
// Name - Name of the operation
Name *string `json:"name,omitempty"`
// Display - Display name of the operation
Display *OperationInfo `json:"display,omitempty"`
// Origin - Origin of the operation
Origin *string `json:"origin,omitempty"`
// Properties - Properties of the operation
Properties interface{} `json:"properties,omitempty"`
}
// OperationInfo information about an operation
type OperationInfo struct {
// Provider - Name of the provider
Provider *string `json:"provider,omitempty"`
// Resource - Name of the resource type
Resource *string `json:"resource,omitempty"`
// Operation - Name of the operation
Operation *string `json:"operation,omitempty"`
// Description - Description of the operation
Description *string `json:"description,omitempty"`
}
// OperationsListResult result of the List Operations operation
type OperationsListResult struct {
autorest.Response `json:"-"`
// Value - A collection of operations
Value *[]Operation `json:"value,omitempty"`
}
// Resource definition of a Resource
type Resource struct {
// ID - READ-ONLY; Fully qualified identifier of the resource
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Type of the resource
Type *string `json:"type,omitempty"`
}
// RetryPolicy information about the retry policy for an event subscription
type RetryPolicy struct {
// MaxDeliveryAttempts - Maximum number of delivery retry attempts for events.
MaxDeliveryAttempts *int32 `json:"maxDeliveryAttempts,omitempty"`
// EventTimeToLiveInMinutes - Time To Live (in minutes) for events.
EventTimeToLiveInMinutes *int32 `json:"eventTimeToLiveInMinutes,omitempty"`
}
// StorageBlobDeadLetterDestination information about the storage blob based dead letter destination.
type StorageBlobDeadLetterDestination struct {
// StorageBlobDeadLetterDestinationProperties - The properties of the Storage Blob based deadletter destination
*StorageBlobDeadLetterDestinationProperties `json:"properties,omitempty"`
// EndpointType - Possible values include: 'EndpointTypeDeadLetterDestination', 'EndpointTypeStorageBlob'
EndpointType EndpointTypeBasicDeadLetterDestination `json:"endpointType,omitempty"`
}
// MarshalJSON is the custom marshaler for StorageBlobDeadLetterDestination.
func (sbdld StorageBlobDeadLetterDestination) MarshalJSON() ([]byte, error) {
sbdld.EndpointType = EndpointTypeStorageBlob
objectMap := make(map[string]interface{})
if sbdld.StorageBlobDeadLetterDestinationProperties != nil {
objectMap["properties"] = sbdld.StorageBlobDeadLetterDestinationProperties
}
if sbdld.EndpointType != "" {
objectMap["endpointType"] = sbdld.EndpointType
}
return json.Marshal(objectMap)
}
// AsStorageBlobDeadLetterDestination is the BasicDeadLetterDestination implementation for StorageBlobDeadLetterDestination.
func (sbdld StorageBlobDeadLetterDestination) AsStorageBlobDeadLetterDestination() (*StorageBlobDeadLetterDestination, bool) {
return &sbdld, true
}
// AsDeadLetterDestination is the BasicDeadLetterDestination implementation for StorageBlobDeadLetterDestination.
func (sbdld StorageBlobDeadLetterDestination) AsDeadLetterDestination() (*DeadLetterDestination, bool) {
return nil, false
}
// AsBasicDeadLetterDestination is the BasicDeadLetterDestination implementation for StorageBlobDeadLetterDestination.
func (sbdld StorageBlobDeadLetterDestination) AsBasicDeadLetterDestination() (BasicDeadLetterDestination, bool) {
return &sbdld, true
}
// UnmarshalJSON is the custom unmarshaler for StorageBlobDeadLetterDestination struct.
func (sbdld *StorageBlobDeadLetterDestination) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var storageBlobDeadLetterDestinationProperties StorageBlobDeadLetterDestinationProperties
err = json.Unmarshal(*v, &storageBlobDeadLetterDestinationProperties)
if err != nil {
return err
}
sbdld.StorageBlobDeadLetterDestinationProperties = &storageBlobDeadLetterDestinationProperties
}
case "endpointType":
if v != nil {
var endpointType EndpointTypeBasicDeadLetterDestination
err = json.Unmarshal(*v, &endpointType)
if err != nil {
return err
}
sbdld.EndpointType = endpointType
}
}
}
return nil
}
// StorageBlobDeadLetterDestinationProperties properties of the storage blob based dead letter destination.
type StorageBlobDeadLetterDestinationProperties struct {
// ResourceID - The Azure Resource ID of the storage account that is the destination of the deadletter events. For example: /subscriptions/{AzureSubscriptionId}/resourceGroups/{ResourceGroupName}/providers/microsoft.Storage/storageAccounts/{StorageAccountName}
ResourceID *string `json:"resourceId,omitempty"`
// BlobContainerName - The name of the Storage blob container that is the destination of the deadletter events
BlobContainerName *string `json:"blobContainerName,omitempty"`
}
// StorageQueueEventSubscriptionDestination information about the storage queue destination for an event
// subscription.
type StorageQueueEventSubscriptionDestination struct {
// StorageQueueEventSubscriptionDestinationProperties - Storage Queue Properties of the event subscription destination
*StorageQueueEventSubscriptionDestinationProperties `json:"properties,omitempty"`
// EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection'
EndpointType EndpointType `json:"endpointType,omitempty"`
}
// MarshalJSON is the custom marshaler for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) MarshalJSON() ([]byte, error) {
sqesd.EndpointType = EndpointTypeStorageQueue
objectMap := make(map[string]interface{})
if sqesd.StorageQueueEventSubscriptionDestinationProperties != nil {
objectMap["properties"] = sqesd.StorageQueueEventSubscriptionDestinationProperties
}
if sqesd.EndpointType != "" {
objectMap["endpointType"] = sqesd.EndpointType
}
return json.Marshal(objectMap)
}
// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) {
return nil, false
}
// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) {
return &sqesd, true
}
// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) {
return nil, false
}
// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for StorageQueueEventSubscriptionDestination.
func (sqesd StorageQueueEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) {
return &sqesd, true
}
// UnmarshalJSON is the custom unmarshaler for StorageQueueEventSubscriptionDestination struct.
func (sqesd *StorageQueueEventSubscriptionDestination) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var storageQueueEventSubscriptionDestinationProperties StorageQueueEventSubscriptionDestinationProperties
err = json.Unmarshal(*v, &storageQueueEventSubscriptionDestinationProperties)
if err != nil {
return err
}
sqesd.StorageQueueEventSubscriptionDestinationProperties = &storageQueueEventSubscriptionDestinationProperties
}
case "endpointType":
if v != nil {
var endpointType EndpointType
err = json.Unmarshal(*v, &endpointType)
if err != nil {
return err
}
sqesd.EndpointType = endpointType
}
}
}
return nil
}
// StorageQueueEventSubscriptionDestinationProperties the properties for a storage queue destination.
type StorageQueueEventSubscriptionDestinationProperties struct {
// ResourceID - The Azure Resource ID of the storage account that contains the queue that is the destination of an event subscription.
ResourceID *string `json:"resourceId,omitempty"`
// QueueName - The name of the Storage queue under a storage account that is the destination of an event subscription.
QueueName *string `json:"queueName,omitempty"`
}
// Topic eventGrid Topic
type Topic struct {
autorest.Response `json:"-"`
// TopicProperties - Properties of the topic
*TopicProperties `json:"properties,omitempty"`
// Location - Location of the resource
Location *string `json:"location,omitempty"`
// Tags - Tags of the resource
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Fully qualified identifier of the resource
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Type of the resource
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for Topic.
func (t Topic) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if t.TopicProperties != nil {
objectMap["properties"] = t.TopicProperties
}
if t.Location != nil {
objectMap["location"] = t.Location
}
if t.Tags != nil {
objectMap["tags"] = t.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Topic struct.
func (t *Topic) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var topicProperties TopicProperties
err = json.Unmarshal(*v, &topicProperties)
if err != nil {
return err
}
t.TopicProperties = &topicProperties
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
t.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
t.Tags = tags
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
t.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
t.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
t.Type = &typeVar
}
}
}
return nil
}
// TopicProperties properties of the Topic
type TopicProperties struct {
// ProvisioningState - READ-ONLY; Provisioning state of the topic. Possible values include: 'TopicProvisioningStateCreating', 'TopicProvisioningStateUpdating', 'TopicProvisioningStateDeleting', 'TopicProvisioningStateSucceeded', 'TopicProvisioningStateCanceled', 'TopicProvisioningStateFailed'
ProvisioningState TopicProvisioningState `json:"provisioningState,omitempty"`
// Endpoint - READ-ONLY; Endpoint for the topic.
Endpoint *string `json:"endpoint,omitempty"`
}
// TopicRegenerateKeyRequest topic regenerate share access key request
type TopicRegenerateKeyRequest struct {
// KeyName - Key name to regenerate key1 or key2
KeyName *string `json:"keyName,omitempty"`
}
// TopicsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type TopicsCreateOrUpdateFuture struct {
azure.FutureAPI
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
Result func(TopicsClient) (Topic, error)
}
// UnmarshalJSON is the custom unmarshaller for CreateFuture.
func (future *TopicsCreateOrUpdateFuture) UnmarshalJSON(body []byte) error {
var azFuture azure.Future
if err := json.Unmarshal(body, &azFuture); err != nil {
return err
}
future.FutureAPI = &azFuture
future.Result = future.result
return nil
}
// result is the default implementation for TopicsCreateOrUpdateFuture.Result.
func (future *TopicsCreateOrUpdateFuture) result(client TopicsClient) (t Topic, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.TopicsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent {
t, err = client.CreateOrUpdateResponder(t.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.TopicsCreateOrUpdateFuture", "Result", t.Response.Response, "Failure responding to request")
}
}
return
}
// TopicsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type TopicsDeleteFuture struct {
azure.FutureAPI
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
Result func(TopicsClient) (autorest.Response, error)
}
// UnmarshalJSON is the custom unmarshaller for CreateFuture.
func (future *TopicsDeleteFuture) UnmarshalJSON(body []byte) error {
var azFuture azure.Future
if err := json.Unmarshal(body, &azFuture); err != nil {
return err
}
future.FutureAPI = &azFuture
future.Result = future.result
return nil
}
// result is the default implementation for TopicsDeleteFuture.Result.
func (future *TopicsDeleteFuture) result(client TopicsClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.TopicsDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// TopicSharedAccessKeys shared access keys of the Topic
type TopicSharedAccessKeys struct {
autorest.Response `json:"-"`
// Key1 - Shared access key1 for the topic.
Key1 *string `json:"key1,omitempty"`
// Key2 - Shared access key2 for the topic.
Key2 *string `json:"key2,omitempty"`
}
// TopicsListResult result of the List Topics operation
type TopicsListResult struct {
autorest.Response `json:"-"`
// Value - A collection of Topics
Value *[]Topic `json:"value,omitempty"`
}
// TopicsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type TopicsUpdateFuture struct {
azure.FutureAPI
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
Result func(TopicsClient) (Topic, error)
}
// UnmarshalJSON is the custom unmarshaller for CreateFuture.
func (future *TopicsUpdateFuture) UnmarshalJSON(body []byte) error {
var azFuture azure.Future
if err := json.Unmarshal(body, &azFuture); err != nil {
return err
}
future.FutureAPI = &azFuture
future.Result = future.result
return nil
}
// result is the default implementation for TopicsUpdateFuture.Result.
func (future *TopicsUpdateFuture) result(client TopicsClient) (t Topic, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.TopicsUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("eventgrid.TopicsUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if t.Response.Response, err = future.GetResult(sender); err == nil && t.Response.Response.StatusCode != http.StatusNoContent {
t, err = client.UpdateResponder(t.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "eventgrid.TopicsUpdateFuture", "Result", t.Response.Response, "Failure responding to request")
}
}
return
}
// TopicTypeInfo properties of a topic type info.
type TopicTypeInfo struct {
autorest.Response `json:"-"`
// TopicTypeProperties - Properties of the topic type info
*TopicTypeProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Fully qualified identifier of the resource
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Type of the resource
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for TopicTypeInfo.
func (tti TopicTypeInfo) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if tti.TopicTypeProperties != nil {
objectMap["properties"] = tti.TopicTypeProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for TopicTypeInfo struct.
func (tti *TopicTypeInfo) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var topicTypeProperties TopicTypeProperties
err = json.Unmarshal(*v, &topicTypeProperties)
if err != nil {
return err
}
tti.TopicTypeProperties = &topicTypeProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
tti.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
tti.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
tti.Type = &typeVar
}
}
}
return nil
}
// TopicTypeProperties properties of a topic type.
type TopicTypeProperties struct {
// Provider - Namespace of the provider of the topic type.
Provider *string `json:"provider,omitempty"`
// DisplayName - Display Name for the topic type.
DisplayName *string `json:"displayName,omitempty"`
// Description - Description of the topic type.
Description *string `json:"description,omitempty"`
// ResourceRegionType - Region type of the resource. Possible values include: 'RegionalResource', 'GlobalResource'
ResourceRegionType ResourceRegionType `json:"resourceRegionType,omitempty"`
// ProvisioningState - Provisioning state of the topic type. Possible values include: 'TopicTypeProvisioningStateCreating', 'TopicTypeProvisioningStateUpdating', 'TopicTypeProvisioningStateDeleting', 'TopicTypeProvisioningStateSucceeded', 'TopicTypeProvisioningStateCanceled', 'TopicTypeProvisioningStateFailed'
ProvisioningState TopicTypeProvisioningState `json:"provisioningState,omitempty"`
// SupportedLocations - List of locations supported by this topic type.
SupportedLocations *[]string `json:"supportedLocations,omitempty"`
}
// TopicTypesListResult result of the List Topic Types operation
type TopicTypesListResult struct {
autorest.Response `json:"-"`
// Value - A collection of topic types
Value *[]TopicTypeInfo `json:"value,omitempty"`
}
// TopicUpdateParameters properties of the Topic update
type TopicUpdateParameters struct {
// Tags - Tags of the resource
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for TopicUpdateParameters.
func (tup TopicUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if tup.Tags != nil {
objectMap["tags"] = tup.Tags
}
return json.Marshal(objectMap)
}
// TrackedResource definition of a Tracked Resource
type TrackedResource struct {
// Location - Location of the resource
Location *string `json:"location,omitempty"`
// Tags - Tags of the resource
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Fully qualified identifier of the resource
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Name of the resource
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Type of the resource
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for TrackedResource.
func (tr TrackedResource) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if tr.Location != nil {
objectMap["location"] = tr.Location
}
if tr.Tags != nil {
objectMap["tags"] = tr.Tags
}
return json.Marshal(objectMap)
}
// WebHookEventSubscriptionDestination information about the webhook destination for an event subscription
type WebHookEventSubscriptionDestination struct {
// WebHookEventSubscriptionDestinationProperties - WebHook Properties of the event subscription destination
*WebHookEventSubscriptionDestinationProperties `json:"properties,omitempty"`
// EndpointType - Possible values include: 'EndpointTypeEventSubscriptionDestination', 'EndpointTypeWebHook', 'EndpointTypeEventHub', 'EndpointTypeStorageQueue', 'EndpointTypeHybridConnection'
EndpointType EndpointType `json:"endpointType,omitempty"`
}
// MarshalJSON is the custom marshaler for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) MarshalJSON() ([]byte, error) {
whesd.EndpointType = EndpointTypeWebHook
objectMap := make(map[string]interface{})
if whesd.WebHookEventSubscriptionDestinationProperties != nil {
objectMap["properties"] = whesd.WebHookEventSubscriptionDestinationProperties
}
if whesd.EndpointType != "" {
objectMap["endpointType"] = whesd.EndpointType
}
return json.Marshal(objectMap)
}
// AsWebHookEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) AsWebHookEventSubscriptionDestination() (*WebHookEventSubscriptionDestination, bool) {
return &whesd, true
}
// AsEventHubEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) AsEventHubEventSubscriptionDestination() (*EventHubEventSubscriptionDestination, bool) {
return nil, false
}
// AsStorageQueueEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) AsStorageQueueEventSubscriptionDestination() (*StorageQueueEventSubscriptionDestination, bool) {
return nil, false
}
// AsHybridConnectionEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) AsHybridConnectionEventSubscriptionDestination() (*HybridConnectionEventSubscriptionDestination, bool) {
return nil, false
}
// AsEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) AsEventSubscriptionDestination() (*EventSubscriptionDestination, bool) {
return nil, false
}
// AsBasicEventSubscriptionDestination is the BasicEventSubscriptionDestination implementation for WebHookEventSubscriptionDestination.
func (whesd WebHookEventSubscriptionDestination) AsBasicEventSubscriptionDestination() (BasicEventSubscriptionDestination, bool) {
return &whesd, true
}
// UnmarshalJSON is the custom unmarshaler for WebHookEventSubscriptionDestination struct.
func (whesd *WebHookEventSubscriptionDestination) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var webHookEventSubscriptionDestinationProperties WebHookEventSubscriptionDestinationProperties
err = json.Unmarshal(*v, &webHookEventSubscriptionDestinationProperties)
if err != nil {
return err
}
whesd.WebHookEventSubscriptionDestinationProperties = &webHookEventSubscriptionDestinationProperties
}
case "endpointType":
if v != nil {
var endpointType EndpointType
err = json.Unmarshal(*v, &endpointType)
if err != nil {
return err
}
whesd.EndpointType = endpointType
}
}
}
return nil
}
// WebHookEventSubscriptionDestinationProperties information about the webhook destination properties for
// an event subscription.
type WebHookEventSubscriptionDestinationProperties struct {
// EndpointURL - The URL that represents the endpoint of the destination of an event subscription.
EndpointURL *string `json:"endpointUrl,omitempty"`
// EndpointBaseURL - READ-ONLY; The base URL that represents the endpoint of the destination of an event subscription.
EndpointBaseURL *string `json:"endpointBaseUrl,omitempty"`
}
// MarshalJSON is the custom marshaler for WebHookEventSubscriptionDestinationProperties.
func (whesdp WebHookEventSubscriptionDestinationProperties) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if whesdp.EndpointURL != nil {
objectMap["endpointUrl"] = whesdp.EndpointURL
}
return json.Marshal(objectMap)
} | if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar) |
grouping.py | '''
Since scaffolds are not directly stored and instead are assocaited with each
contig, we must extract the total length of scaffolds in each assembly, as
well as the length of any intersections.
Then, we get the maximum weighted jaccard index for each reference scaffold,
which is defined as the length of the intersection divided by the length of |
The sum of these maximum weighted indicies are then divided by the total
length of the reference scaffolds.
'''
def count(first, second):
first_contigs, first_positions = first
second_contigs, second_positions = second
intersection_scaffolds = {}
first_scaffolds = {}
second_scaffolds = {}
for contig in first_contigs:
# Get contig length and scaffold information.
contig_length = first_contigs[contig]['length']
first_scaffold_name = first_contigs[contig]['scaffold']
second_scaffold_name = second_contigs[contig]['scaffold']
# Iterate the count and length on the scaffold of the first assembly.
if first_scaffold_name not in first_scaffolds:
first_scaffolds[first_scaffold_name] = contig_length
else:
first_scaffolds[first_scaffold_name] += contig_length
# Iterate the count and length on the scaffold of the second assembly.
if second_scaffold_name not in second_scaffolds:
second_scaffolds[second_scaffold_name] = contig_length
else:
second_scaffolds[second_scaffold_name] += contig_length
# Iterate the count of the intersection.
intersection = (first_scaffold_name, second_scaffold_name)
if intersection not in intersection_scaffolds:
intersection_scaffolds[intersection] = contig_length
else:
intersection_scaffolds[intersection] += contig_length
weighted_jaccard = []
for i in first_scaffolds:
maximum = 0
for j in second_scaffolds:
# Get a value for the intersection.
if (i,j) not in intersection_scaffolds:
continue
# Get a value for the union.
weighted_union = \
first_scaffolds[i] + \
second_scaffolds[j] - \
intersection_scaffolds[(i,j)]
# Append the Jaccard index.
weighted_index = \
(intersection_scaffolds[(i,j)]/ \
weighted_union) * \
first_scaffolds[i]
if weighted_index > maximum:
maximum = weighted_index
weighted_jaccard.append(maximum)
return sum(weighted_jaccard)/sum(first_scaffolds.values()) | the union. To weight this in the averaging step, we then multiply this by
the length of the reference scaffold. |
debugger.rs | #[doc = "Reader of register DEBUGGER"]
pub type R = crate::R<u32, super::DEBUGGER>;
#[doc = "Writer for register DEBUGGER"]
pub type W = crate::W<u32, super::DEBUGGER>;
#[doc = "Register DEBUGGER `reset()`'s with value 0"]
impl crate::ResetValue for super::DEBUGGER {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `LOCKOUT`"]
pub type LOCKOUT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `LOCKOUT`"]
pub struct LOCKOUT_W<'a> {
w: &'a mut W,
}
impl<'a> LOCKOUT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Lockout of debugger (SWD)."]
#[inline(always)]
pub fn lockout(&self) -> LOCKOUT_R |
}
impl W {
#[doc = "Bit 0 - Lockout of debugger (SWD)."]
#[inline(always)]
pub fn lockout(&mut self) -> LOCKOUT_W {
LOCKOUT_W { w: self }
}
}
| {
LOCKOUT_R::new((self.bits & 0x01) != 0)
} |
stack_upgrade.py | import os
import shutil
from cement.core.controller import CementBaseController, expose
from wo.cli.plugins.stack_pref import post_pref, pre_pref, pre_stack
from wo.core.aptget import WOAptGet
from wo.core.download import WODownload
from wo.core.extract import WOExtract
from wo.core.fileutils import WOFileUtils
from wo.core.logging import Log
from wo.core.shellexec import WOShellExec
from wo.core.variables import WOVar
from wo.core.services import WOService
class WOStackUpgradeController(CementBaseController):
class Meta:
label = 'upgrade'
stacked_on = 'stack'
stacked_type = 'nested'
description = ('Upgrade stack safely')
arguments = [
(['--all'],
dict(help='Upgrade all stack', action='store_true')),
(['--web'],
dict(help='Upgrade web stack', action='store_true')),
(['--admin'],
dict(help='Upgrade admin tools stack', action='store_true')),
(['--security'],
dict(help='Upgrade security stack', action='store_true')),
(['--nginx'],
dict(help='Upgrade Nginx stack', action='store_true')),
(['--php'],
dict(help='Upgrade PHP 7.2 stack', action='store_true')),
(['--php72'],
dict(help='Upgrade PHP 7.2 stack', action='store_true')),
(['--php73'],
dict(help='Upgrade PHP 7.3 stack', action='store_true')),
(['--php74'],
dict(help='Upgrade PHP 7.4 stack', action='store_true')),
(['--mysql'],
dict(help='Upgrade MySQL stack', action='store_true')),
(['--wpcli'],
dict(help='Upgrade WPCLI', action='store_true')),
(['--redis'],
dict(help='Upgrade Redis', action='store_true')),
(['--netdata'],
dict(help='Upgrade Netdata', action='store_true')),
(['--fail2ban'],
dict(help='Upgrade Fail2Ban', action='store_true')),
(['--dashboard'],
dict(help='Upgrade WordOps Dashboard', action='store_true')),
(['--composer'],
dict(help='Upgrade Composer', action='store_true')),
(['--mysqltuner'],
dict(help='Upgrade Composer', action='store_true')),
(['--phpmyadmin'],
dict(help='Upgrade phpMyAdmin', action='store_true')),
(['--adminer'],
dict(help='Upgrade Adminer', action='store_true')),
(['--ngxblocker'],
dict(help='Upgrade phpMyAdmin', action='store_true')),
(['--no-prompt'],
dict(help="Upgrade Packages without any prompt",
action='store_true')),
(['--force'],
dict(help="Force Packages upgrade without any prompt",
action='store_true')),
]
@expose(hide=True)
def default(self, disp_msg=False):
# All package update
apt_packages = []
packages = []
self.msg = []
pargs = self.app.pargs
wo_phpmyadmin = WODownload.pma_release(self)
if not (pargs.web or pargs.nginx or pargs.php or
pargs.php72 or pargs.php73 or pargs.php74 or pargs.mysql or
pargs.ngxblocker or pargs.all or pargs.netdata or
pargs.wpcli or pargs.composer or pargs.phpmyadmin or
pargs.adminer or pargs.dashboard or pargs.mysqltuner or
pargs.redis or pargs.fail2ban or pargs.security):
pargs.web = True
pargs.admin = True
pargs.security = True
if pargs.php:
pargs.php72 = True
if pargs.all:
pargs.web = True
pargs.admin = True
pargs.security = True
pargs.redis = True
if pargs.web:
pargs.nginx = True
pargs.php72 = True
pargs.php73 = True
pargs.php74 = True
pargs.mysql = True
pargs.wpcli = True
if pargs.admin:
pargs.netdata = True
pargs.composer = True
pargs.dashboard = True
pargs.phpmyadmin = True
pargs.wpcli = True
pargs.adminer = True
pargs.mysqltuner = True
if pargs.security:
pargs.ngxblocker = True
pargs.fail2ban = True
# nginx
if pargs.nginx:
if WOAptGet.is_installed(self, 'nginx-custom'):
apt_packages = apt_packages + WOVar.wo_nginx
else:
if os.path.isfile('/usr/sbin/nginx'): | post_pref(self, WOVar.wo_nginx, [])
else:
Log.info(self, "Nginx Stable is not already installed")
# php 7.2
if pargs.php72:
if WOAptGet.is_installed(self, 'php7.2-fpm'):
apt_packages = apt_packages + WOVar.wo_php72 + \
WOVar.wo_php_extra
# php 7.3
if pargs.php73:
if WOAptGet.is_installed(self, 'php7.3-fpm'):
apt_packages = apt_packages + WOVar.wo_php73 + \
WOVar.wo_php_extra
# php 7.4
if pargs.php74:
if WOAptGet.is_installed(self, 'php7.4-fpm'):
apt_packages = apt_packages + WOVar.wo_php74 + \
WOVar.wo_php_extra
# mysql
if pargs.mysql:
if WOShellExec.cmd_exec(self, 'mysqladmin ping'):
apt_packages = apt_packages + ['mariadb-server']
# redis
if pargs.redis:
if WOAptGet.is_installed(self, 'redis-server'):
apt_packages = apt_packages + ['redis-server']
# fail2ban
if pargs.fail2ban:
if WOAptGet.is_installed(self, 'fail2ban'):
apt_packages = apt_packages + ['fail2ban']
# wp-cli
if pargs.wpcli:
if os.path.isfile('/usr/local/bin/wp'):
packages = packages + [[
"https://github.com/wp-cli/wp-cli/"
"releases/download/v{0}/"
"wp-cli-{0}.phar".format(WOVar.wo_wp_cli),
"/usr/local/bin/wp",
"WP-CLI"]]
else:
Log.info(self, "WPCLI is not installed with WordOps")
# netdata
if pargs.netdata:
# detect static binaries install
if os.path.isdir('/opt/netdata'):
packages = packages + [[
'https://my-netdata.io/kickstart-static64.sh',
'/var/lib/wo/tmp/kickstart.sh', 'Netdata']]
# detect install from source
elif os.path.isdir('/etc/netdata'):
packages = packages + [[
'https://my-netdata.io/kickstart.sh',
'/var/lib/wo/tmp/kickstart.sh', 'Netdata']]
else:
Log.info(self, 'Netdata is not installed')
# wordops dashboard
if pargs.dashboard:
if (os.path.isfile('/var/www/22222/htdocs/index.php') or
os.path.isfile('/var/www/22222/htdocs/index.html')):
packages = packages + [[
"https://github.com/WordOps/wordops-dashboard/"
"releases/download/v{0}/wordops-dashboard.tar.gz"
.format(WOVar.wo_dashboard),
"/var/lib/wo/tmp/wo-dashboard.tar.gz",
"WordOps Dashboard"]]
else:
Log.info(self, 'WordOps dashboard is not installed')
# phpmyadmin
if pargs.phpmyadmin:
if os.path.isdir('/var/www/22222/htdocs/db/pma'):
packages = packages + [[
"https://files.phpmyadmin.net"
"/phpMyAdmin/{0}/phpMyAdmin-{0}-"
"all-languages.tar.gz"
.format(wo_phpmyadmin),
"/var/lib/wo/tmp/pma.tar.gz",
"PHPMyAdmin"]]
else:
Log.info(self, "phpMyAdmin isn't installed")
# adminer
if pargs.adminer:
if os.path.isfile("{0}22222/htdocs/db/"
"adminer/index.php"
.format(WOVar.wo_webroot)):
Log.debug(self, "Setting packages variable for Adminer ")
packages = packages + [[
"https://www.adminer.org/latest.php",
"{0}22222/"
"htdocs/db/adminer/index.php"
.format(WOVar.wo_webroot),
"Adminer"],
["https://raw.githubusercontent.com"
"/vrana/adminer/master/designs/"
"pepa-linha/adminer.css",
"{0}22222/"
"htdocs/db/adminer/adminer.css"
.format(WOVar.wo_webroot),
"Adminer theme"]]
else:
Log.debug(self, "Adminer isn't installed")
Log.info(self, "Adminer isn't installed")
# composer
if pargs.composer:
if os.path.isfile('/usr/local/bin/composer'):
packages = packages + [[
"https://getcomposer.org/installer",
"/var/lib/wo/tmp/composer-install",
"Composer"]]
else:
Log.info(self, "Composer isn't installed")
# mysqltuner
if pargs.mysqltuner:
if WOAptGet.is_exec(self, 'mysqltuner'):
Log.debug(self, "Setting packages variable "
"for MySQLTuner ")
packages = packages + [["https://raw."
"githubusercontent.com/"
"major/MySQLTuner-perl"
"/master/mysqltuner.pl",
"/usr/bin/mysqltuner",
"MySQLTuner"]]
# ngxblocker
if pargs.ngxblocker:
if os.path.exists('/usr/local/sbin/install-ngxblocker'):
packages = packages + [[
'https://raw.githubusercontent.com/mitchellkrogza/'
'nginx-ultimate-bad-bot-blocker/master/update-ngxblocker',
'/usr/local/sbin/update-ngxblocker',
'ngxblocker'
]]
if ((not (apt_packages)) and (not(packages))):
self.app.args.print_help()
else:
pre_stack(self)
if (apt_packages):
if not ("php7.2-fpm" in apt_packages or
"php7.3-fpm" in apt_packages or
"php7.4-fpm" in apt_packages or
"redis-server" in apt_packages or
"nginx-custom" in apt_packages or
"mariadb-server" in apt_packages):
pass
else:
Log.warn(
self, "Your sites may be down for few seconds if "
"you are upgrading Nginx, PHP-FPM, MariaDB or Redis")
# Check prompt
if not (pargs.no_prompt or pargs.force):
start_upgrade = input("Do you want to continue:[y/N]")
if start_upgrade != "Y" and start_upgrade != "y":
Log.error(self, "Not starting package update")
Log.wait(self, "Updating APT cache")
# apt-get update
WOAptGet.update(self)
Log.valide(self, "Updating APT cache")
# additional pre_pref
if "nginx-custom" in apt_packages:
pre_pref(self, WOVar.wo_nginx)
if "php7.2-fpm" in apt_packages:
WOAptGet.remove(self, ['php7.2-fpm'],
auto=False, purge=True)
if "php7.3-fpm" in apt_packages:
WOAptGet.remove(self, ['php7.3-fpm'],
auto=False, purge=True)
if "php7.4-fpm" in apt_packages:
WOAptGet.remove(self, ['php7.4-fpm'],
auto=False, purge=True)
# check if nginx upgrade is blocked
if os.path.isfile(
'/etc/apt/preferences.d/nginx-block'):
post_pref(self, WOVar.wo_nginx, [], True)
# upgrade packages
WOAptGet.install(self, apt_packages)
Log.wait(self, "Configuring APT Packages")
post_pref(self, apt_packages, [], True)
if "mariadb-server" in apt_packages:
WOShellExec.cmd_exec(self, 'mysql_upgrade')
Log.valide(self, "Configuring APT Packages")
# Post Actions after package updates
if (packages):
if WOAptGet.is_selected(self, 'WP-CLI', packages):
WOFileUtils.rm(self, '/usr/local/bin/wp')
if WOAptGet.is_selected(self, 'Netdata', packages):
WOFileUtils.rm(self, '/var/lib/wo/tmp/kickstart.sh')
if WOAptGet.is_selected(self, 'ngxblocker', packages):
WOFileUtils.rm(self, '/usr/local/sbin/update-ngxblocker')
if WOAptGet.is_selected(self, 'WordOps Dashboard', packages):
if os.path.isfile('/var/www/22222/htdocs/index.php'):
WOFileUtils.rm(self, '/var/www/22222/htdocs/index.php')
if os.path.isfile('/var/www/22222/htdocs/index.html'):
WOFileUtils.rm(
self, '/var/www/22222/htdocs/index.html')
Log.debug(self, "Downloading following: {0}".format(packages))
WODownload.download(self, packages)
if WOAptGet.is_selected(self, 'WP-CLI', packages):
WOFileUtils.chmod(self, "/usr/local/bin/wp", 0o775)
if WOAptGet.is_selected(self, 'ngxblocker', packages):
if os.path.exists('/etc/nginx/conf.d/variables-hash.conf'):
WOFileUtils.rm(
self, '/etc/nginx/conf.d/variables-hash.conf')
WOFileUtils.chmod(
self, '/usr/local/sbin/update-ngxblocker', 0o775)
WOShellExec.cmd_exec(
self, '/usr/local/sbin/update-ngxblocker -nq')
if WOAptGet.is_selected(self, 'MySQLTuner', packages):
WOFileUtils.chmod(self, "/usr/bin/mysqltuner", 0o775)
if os.path.exists('/usr/local/bin/mysqltuner'):
WOFileUtils.rm(self, '/usr/local/bin/mysqltuner')
# Netdata
if WOAptGet.is_selected(self, 'Netdata', packages):
WOService.stop_service(self, 'netdata')
Log.wait(self, "Upgrading Netdata")
# detect static binaries install
WOShellExec.cmd_exec(
self,
"bash /var/lib/wo/tmp/kickstart.sh "
"--dont-wait --no-updates",
errormsg='', log=False)
Log.valide(self, "Upgrading Netdata")
if WOAptGet.is_selected(self, 'WordOps Dashboard', packages):
post_pref(
self, [], [["https://github.com/WordOps"
"/wordops-dashboard/"
"releases/download/v{0}/"
"wordops-dashboard.tar.gz"
.format(WOVar.wo_dashboard),
"/var/lib/wo/tmp/wo-dashboard.tar.gz",
"WordOps Dashboard"]])
if WOAptGet.is_selected(self, 'Composer', packages):
Log.wait(self, "Upgrading Composer")
if WOShellExec.cmd_exec(
self, '/usr/bin/php -v'):
WOShellExec.cmd_exec(
self, "php -q /var/lib/wo"
"/tmp/composer-install "
"--install-dir=/var/lib/wo/tmp/")
shutil.copyfile('/var/lib/wo/tmp/composer.phar',
'/usr/local/bin/composer')
WOFileUtils.chmod(self, "/usr/local/bin/composer", 0o775)
Log.valide(self, "Upgrading Composer ")
if WOAptGet.is_selected(self, 'PHPMyAdmin', packages):
Log.wait(self, "Upgrading phpMyAdmin")
WOExtract.extract(self, '/var/lib/wo/tmp/pma.tar.gz',
'/var/lib/wo/tmp/')
shutil.copyfile(('{0}22222/htdocs/db/pma'
'/config.inc.php'
.format(WOVar.wo_webroot)),
('/var/lib/wo/tmp/phpMyAdmin-{0}'
'-all-languages/config.inc.php'
.format(wo_phpmyadmin))
)
WOFileUtils.rm(self, '{0}22222/htdocs/db/pma'
.format(WOVar.wo_webroot))
shutil.move('/var/lib/wo/tmp/phpMyAdmin-{0}'
'-all-languages/'
.format(wo_phpmyadmin),
'{0}22222/htdocs/db/pma/'
.format(WOVar.wo_webroot))
WOFileUtils.chown(self, "{0}22222/htdocs"
.format(WOVar.wo_webroot),
'www-data',
'www-data', recursive=True)
Log.valide(self, "Upgrading phpMyAdmin")
if os.path.exists('{0}22222/htdocs'.format(WOVar.wo_webroot)):
WOFileUtils.chown(self, "{0}22222/htdocs"
.format(WOVar.wo_webroot),
'www-data',
'www-data', recursive=True)
Log.info(self, "Successfully updated packages") | Log.info(self, "Updating Nginx templates") |
io_fs.py | # Dmitry Kisler © 2020-present
# www.dkisler.com
from gzip import open as gzip_open
from typing import Tuple, Union, Any
import pickle
import json
def corpus_reader(path: str,
from_memory: bool = False) -> Union[Tuple[str, None],
Tuple[None, str]]:
"""Function to read corpus text file.
Args:
path: Path to the file.
from_memory: To "read" from memory
Returns:
Corpus text and error string in case of any.
Raises:
IOError: Occurred on reading/unpacking error.
"""
try:
if from_memory:
return path.read(), None
else:
if path.endswith(".gz"):
with gzip_open(path, 'rb') as f:
return f.read(), None
else:
with open(path, 'r', encoding='utf-8') as f:
return f.read(), None
except IOError as ex:
return None, ex
def prediction_writer(obj: dict,
path: str) -> None:
" |
def load_obj_pkl(path: str) -> Any:
"""Function to load and deserialize object from pickled file.
Args:
path: Path to object.
Returns:
Deserialized/un-pickled object.
Rises:
IOError, pickle.UnpicklingError: Occurred when loading/deserializing the obj.
"""
try:
with open(path, 'rb') as f:
return pickle.load(f)
except IOError as ex:
raise ex
except pickle.UnpicklingError as ex:
raise ex
def save_obj_pkl(obj: Any,
path: str) -> None:
"""Function to serialize and save the object as pickled file.
Args:
obj: Python object to pickle.
path: Path to store to.
Raises:
IOError, pickle.PicklingError: Occurred on writing/pickling error.
"""
try:
with open(path, 'wb') as f:
pickle.dump(obj, f)
except IOError as ex:
raise ex
except pickle.PicklingError as ex:
raise ex
| ""Function to write prediction as json file.
Args:
obj: Prediction object to write to.
path: Path to the file.
Raises:
IOError: Occurred on writing error.
"""
try:
if path.endswith(".gz"):
with gzip_open(path, 'wb') as f:
f.write(json.dumps(obj).encode('utf-8'))
else:
with open(path, 'w') as f:
json.dump(obj, f)
except IOError as ex:
raise ex
|
Sum_of_bit_differences.py | t = int(input())
# Python program to compute sum of pairwise bit differences
def | (arr,n):
ans = 0 # Initialize result
# traverse over all bits
for i in range(0, 32):
# count number of elements with i'th bit set
count = 0
for j in range(0,n):
if ( (arr[j] & (1 << i)) ):
count+=1
# Add "count * (n - count) * 2" to the answer
ans += (count * (n - count) * 2);
return ans
for _ in range(t):
n = int(input())
l = list(map(int,input().split()))
print(sumBitDifferences(l,n)) | sumBitDifferences |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.