instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
stringclasses
1 value
environment_setup_commit
stringlengths
40
40
FAIL_TO_PASS
listlengths
1
4.94k
PASS_TO_PASS
listlengths
0
7.82k
meta
dict
created_at
stringlengths
25
25
license
stringclasses
8 values
__index_level_0__
int64
0
6.41k
physiopy__phys2bids-189
diff --git a/phys2bids/cli/run.py b/phys2bids/cli/run.py index a0ffbd3..8b9118c 100644 --- a/phys2bids/cli/run.py +++ b/phys2bids/cli/run.py @@ -110,8 +110,10 @@ def _get_parser(): dest='thr', type=float, help='Threshold to use for trigger detection. ' - 'Default is 2.5.', - default=2.5) + 'If "ntp" and "TR" are specified, phys2bids automatically computes ' + 'a threshold to detect the triggers. Use this parameter to set it ' + 'manually', + default=None) optional.add_argument('-chnames', '--channel-names', dest='ch_name', nargs='*', diff --git a/phys2bids/phys2bids.py b/phys2bids/phys2bids.py index bed7050..4bf09d1 100644 --- a/phys2bids/phys2bids.py +++ b/phys2bids/phys2bids.py @@ -176,7 +176,7 @@ def use_heuristic(heur_file, sub, ses, filename, outdir, record_label=''): def phys2bids(filename, info=False, indir='.', outdir='.', heur_file=None, sub=None, ses=None, chtrig=0, chsel=None, num_timepoints_expected=0, - tr=1, thr=2.5, ch_name=[], chplot='', debug=False, quiet=False): + tr=1, thr=None, ch_name=[], chplot='', debug=False, quiet=False): """ Main workflow of phys2bids. Runs the parser, does some checks on input, then imports @@ -259,13 +259,12 @@ def phys2bids(filename, info=False, indir='.', outdir='.', heur_file=None, if info: return - # Run analysis on trigger channel to get first timepoint and the time offset. - # #!# Get option of no trigger! (which is wrong practice or Respiract) - phys_in.check_trigger_amount(chtrig, thr, num_timepoints_expected, tr) - # Create trigger plot. If possible, to have multiple outputs in the same # place, adds sub and ses label. - if tr != 0 or num_timepoints_expected != 0: + if tr != 0 and num_timepoints_expected != 0: + # Run analysis on trigger channel to get first timepoint and the time offset. + # #!# Get option of no trigger! (which is wrong practice or Respiract) + phys_in.check_trigger_amount(chtrig, thr, num_timepoints_expected, tr) LGR.info('Plot trigger') plot_path = os.path.join(outdir, os.path.splitext(os.path.basename(filename))[0]) @@ -274,7 +273,7 @@ def phys2bids(filename, info=False, indir='.', outdir='.', heur_file=None, if ses: plot_path += f'_ses-{ses}' viz.plot_trigger(phys_in.timeseries[0], phys_in.timeseries[chtrig], - plot_path, tr, thr, num_timepoints_expected, filename) + plot_path, tr, phys_in.thr, num_timepoints_expected, filename) else: LGR.info('Not plotting trigger. If you want the trigger to be' ' plotted enter -tr or -ntp, preferably both.') @@ -348,7 +347,7 @@ def phys2bids(filename, info=False, indir='.', outdir='.', heur_file=None, outfile = f'{outfile}_{uniq_freq}' LGR.info(f'Exporting files for freq {uniq_freq}') - savetxt(outfile + '.tsv.gz', phys_out[uniq_freq].timeseries.T, + savetxt(outfile + '.tsv.gz', phys_out[uniq_freq].timeseries, fmt='%.8e', delimiter='\t') print_json(outfile, phys_out[uniq_freq].freq, phys_out[uniq_freq].start_time, diff --git a/phys2bids/physio_obj.py b/phys2bids/physio_obj.py index 15fa8df..016872f 100644 --- a/phys2bids/physio_obj.py +++ b/phys2bids/physio_obj.py @@ -6,6 +6,7 @@ I/O objects for phys2bids. """ import logging +from itertools import groupby import numpy as np @@ -101,9 +102,12 @@ class BlueprintInput(): in the output files. units : (ch) list of strings List of the units of the channels. - num_timepoints_found: int + num_timepoints_found: int or None Amount of timepoints found in the automatic count. - This is computed internally, *if* check_trigger_amount() is run + This is initialised as "None" and then computed internally, + *if* check_trigger_amount() is run + thr: float + Threshold used by check_trigger_amount() to detect trigger points. Methods ------- @@ -144,6 +148,7 @@ class BlueprintInput(): self.ch_amount, 0.0) self.ch_name = has_size(ch_name, self.ch_amount, 'unknown') self.units = has_size(units, self.ch_amount, '[]') + self.num_timepoints_found = None @property def ch_amount(self): @@ -232,7 +237,7 @@ class BlueprintInput(): del self.ch_name[idx] del self.units[idx] - def check_trigger_amount(self, chtrig=1, thr=2.5, num_timepoints_expected=0, tr=0): + def check_trigger_amount(self, chtrig=1, thr=None, num_timepoints_expected=0, tr=0): """ Counts trigger points and corrects time offset in the list representing time. @@ -250,6 +255,8 @@ class BlueprintInput(): Notes ----- Outcome: + self.thr: float + Threshold used by the function to detect trigger points. self.num_timepoints_found: int Property of the `BlueprintInput` class. Contains the number of timepoints found @@ -259,11 +266,16 @@ class BlueprintInput(): the time of first trigger. """ LGR.info('Counting trigger points') - # Use first derivative of the trigger channel to find the TRs, + # Use the trigger channel to find the TRs, # comparing it to a given threshold. - trigger_deriv = np.diff(self.timeseries[chtrig]) - timepoints = trigger_deriv > thr - num_timepoints_found = timepoints.sum() + trigger = self.timeseries[chtrig] + if thr is None: + thr = np.mean(trigger) + 2 * np.std(trigger) + timepoints = trigger > thr + num_timepoints_found = len([is_true for is_true, _ in groupby(timepoints, + lambda x: x != 0) if is_true]) + LGR.info(f'The number of timepoints according to the std_thr method ' + f'is {num_timepoints_found}. The computed threshold is {thr}') time_offset = self.timeseries[0][timepoints.argmax()] if num_timepoints_expected: @@ -296,7 +308,7 @@ class BlueprintInput(): else: LGR.warning('The necessary options to find the amount of timepoints ' 'were not provided.') - + self.thr = thr self.timeseries[0] -= time_offset self.num_timepoints_found = num_timepoints_found @@ -379,7 +391,7 @@ class BlueprintOutput(): int Number of channels """ - return len(self.timeseries) + return self.timeseries.shape[1] def return_index(self, idx): """ @@ -397,7 +409,7 @@ class BlueprintOutput(): Tuple containing the proper list entry of all the properties of the object with index `idx` """ - return (self.timeseries[idx], self.ch_amount, self.freq, + return (self.timeseries[:, idx], self.ch_amount, self.freq, self.ch_name[idx], self.units[idx], self.start_time) def delete_at_index(self, idx): @@ -423,7 +435,7 @@ class BlueprintOutput(): In all the property that are lists, the element correspondent to `idx` gets deleted """ - self.timeseries = np.delete(self.timeseries, idx, axis=0) + self.timeseries = np.delete(self.timeseries, idx, axis=1) del self.ch_name[idx] del self.units[idx] @@ -445,7 +457,7 @@ class BlueprintOutput(): cls: :obj: `BlueprintOutput` Populated `BlueprintOutput` object. """ - timeseries = np.asarray(blueprint.timeseries) + timeseries = np.asarray(blueprint.timeseries).T freq = blueprint.freq[0] ch_name = blueprint.ch_name units = blueprint.units diff --git a/phys2bids/viz.py b/phys2bids/viz.py index 8d89411..4fb6970 100644 --- a/phys2bids/viz.py +++ b/phys2bids/viz.py @@ -66,7 +66,7 @@ def plot_trigger(time, trigger, fileprefix, tr, thr, num_timepoints_expected, subplot.set_ylabel('Volts') subplot.plot(time, trigger, '-', time, thrline, 'r-.', time, block, '-') subplot.fill_between(time, block, where=block >= d, interpolate=True, color='#ffbb6e') - subplot.legend(["trigger", "input threshold", "time block"], loc='upper right') + subplot.legend(["trigger", "Trigger detection threshold", "time block"], loc='upper right') # plot the first spike according to the user threshold subplot = fig.add_subplot(223) subplot.set_xlim([-tr * 4, tr * 4])
physiopy/phys2bids
9ec9c33da2e7c3adf7653a79cb024ffcc130bf42
diff --git a/phys2bids/tests/test_integration.py b/phys2bids/tests/test_integration.py index 6b8298d..7604484 100644 --- a/phys2bids/tests/test_integration.py +++ b/phys2bids/tests/test_integration.py @@ -126,7 +126,7 @@ def test_integration_acq(samefreq_full_acq_file): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '10000.0') # Check sampling started - assert check_string(log_info, 'Sampling started', '10.425007798392297') + assert check_string(log_info, 'Sampling started', '10.425107798467103') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -136,7 +136,7 @@ def test_integration_acq(samefreq_full_acq_file): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 10000.0) - assert math.isclose(json_data['StartTime'], 10.425007798392297) + assert math.isclose(json_data['StartTime'], 10.425107798467103) assert json_data['Columns'] == ['time', 'RESP - RSP100C', 'PULSE - Custom, DA100C', 'MR TRIGGER - Custom, HLT100C - A 5', 'PPG100C', 'CO2', 'O2'] @@ -208,7 +208,7 @@ def test_integration_multifreq(multifreq_acq_file): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '10000.0') # Check sampling started - assert check_string(log_info, 'Sampling started', '10.425007798392297') + assert check_string(log_info, 'Sampling started', '10.425107798467103') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -218,7 +218,7 @@ def test_integration_multifreq(multifreq_acq_file): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 10000.0) - assert math.isclose(json_data['StartTime'], 10.425007798392297) + assert math.isclose(json_data['StartTime'], 10.425107798467103) assert json_data['Columns'] == ['time', 'RESP - RSP100C', 'MR TRIGGER - Custom, HLT100C - A 5', 'PPG100C', 'CO2', 'O2'] diff --git a/phys2bids/tests/test_physio_obj.py b/phys2bids/tests/test_physio_obj.py index 488b374..f5cabaf 100644 --- a/phys2bids/tests/test_physio_obj.py +++ b/phys2bids/tests/test_physio_obj.py @@ -93,7 +93,7 @@ def test_BlueprintOutput(): # Tests init_from_blueprint blueprint_out = po.BlueprintOutput.init_from_blueprint(blueprint_in) start_time = blueprint_out.start_time - assert (blueprint_out.timeseries == test_timeseries).all() + assert (blueprint_out.timeseries == np.asarray(test_timeseries).T).all() assert blueprint_out.freq == test_freq[0] assert blueprint_out.ch_name == test_chn_name assert blueprint_out.units == test_units @@ -102,16 +102,16 @@ def test_BlueprintOutput(): # Tests return_index test_timeseries = np.array([[0, 1, 1, 2, 3, 5, 8, 13], [0, 1, 0, 0, 0, 0, 0, 0], - [1, 0, 0, 1, 0, 0, 1, 0]]) + [1, 0, 0, 1, 0, 0, 1, 0]]).T test_freq = 42.0 test_chn_name = ['trigger', 'time', 'chocolate'] test_units = ['s', 's', 'sweetness'] - num_channnels = len(test_timeseries) + num_channnels = test_timeseries.shape[1] blueprint_out = po.BlueprintOutput(test_timeseries, test_freq, test_chn_name, test_units, start_time) test_index = blueprint_out.return_index(1) assert (test_index[0] == test_trigger).all() - assert test_index[1] == len(test_timeseries) + assert test_index[1] == test_timeseries.shape[1] assert test_index[3] == test_chn_name[1] assert test_index[4] == test_units[1] @@ -119,5 +119,5 @@ def test_BlueprintOutput(): blueprint_out.delete_at_index(1) assert len(blueprint_out.ch_name) == num_channnels - 1 assert len(blueprint_out.units) == num_channnels - 1 - assert blueprint_out.timeseries.shape[0] == num_channnels - 1 + assert blueprint_out.timeseries.shape[1] == num_channnels - 1 assert blueprint_out.ch_amount == num_channnels - 1
Output files are not BIDS compliant <!--- Provide a general summary of the issue in the Title above --> ## Expected Behavior <!--- NECESSARY --> <!--- Describe what one would expect from the buggy code --> Following [BIDS 1.2.2](https://bids-specification.readthedocs.io/en/v1.2.2/), the `.tsv.gz` files should have a column for each channel. ## Actual Behavior <!--- NECESSARY --> <!--- Describe what the buggy code is actually doing/returning --> <!--- Do not hesitate and share screenshots and code snippets that could help understand the issue --> Our output has a row for each channel ## Possible solution <!--- Describe a possible approach to solve the issue --> We can solve this in two ways: 1. a very easy very quick `BlueprintOutput.timeseries` transpose before (during) the final export to `tsv`, or 2. a less easy, less quick inversion of `BlueprintOutput.timeseries` indexes, followed by adaptation of the scripts, so that `BlueprintOutput.timeseries` will be always created and managed as if we were importing a BIDS compliant `tsv` file. I prefer option number 2, that could be a bit less easy, but will work very well in case we need to import files or we need this object to be used in other libraries that only import BIDS compliant files. But I'd like to know what y'all think (starting with code contributors @vinferrer @rmarkello @eurunuela) (thanks to @sangfrois for pointing it out)
0.0
9ec9c33da2e7c3adf7653a79cb024ffcc130bf42
[ "phys2bids/tests/test_integration.py::test_integration_acq", "phys2bids/tests/test_integration.py::test_integration_multifreq", "phys2bids/tests/test_physio_obj.py::test_BlueprintOutput" ]
[ "phys2bids/tests/test_integration.py::test_logger", "phys2bids/tests/test_integration.py::test_integration_tutorial", "phys2bids/tests/test_integration.py::test_integration_heuristic", "phys2bids/tests/test_integration.py::test_integration_info", "phys2bids/tests/test_physio_obj.py::test_is_valid", "phys2bids/tests/test_physio_obj.py::test_has_size", "phys2bids/tests/test_physio_obj.py::test_BlueprintInput" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-03-26 18:39:46+00:00
apache-2.0
4,535
physiopy__phys2bids-193
diff --git a/phys2bids/phys2bids.py b/phys2bids/phys2bids.py index 4bf09d1..c3b3ec7 100644 --- a/phys2bids/phys2bids.py +++ b/phys2bids/phys2bids.py @@ -254,7 +254,8 @@ def phys2bids(filename, info=False, indir='.', outdir='.', heur_file=None, phys_in.print_info(filename) # #!# Here the function viz.plot_channel should be called if chplot != '' or info: - viz.plot_all(phys_in, infile, chplot) + viz.plot_all(phys_in.ch_name, phys_in.timeseries, phys_in.units, + phys_in.freq, infile, chplot) # If only info were asked, end here. if info: return diff --git a/phys2bids/viz.py b/phys2bids/viz.py index 4fb6970..d925f6c 100644 --- a/phys2bids/viz.py +++ b/phys2bids/viz.py @@ -14,8 +14,12 @@ FIGSIZE = (18, 10) def plot_trigger(time, trigger, fileprefix, tr, thr, num_timepoints_expected, filename, figsize=FIGSIZE, dpi=SET_DPI): """ - Produces a textfile of the specified extension `ext`, - containing the given content `text`. + Produces a figure with three plots: + 1. Plots the triggers in blue, a block in orange that indicates + the time from the first trigger to the last, and a red line showing + the threshold used for trigger detection + 2. Same plot but showing only the intial trigger + 3. Same plot but showing only the intial trigger Parameters ---------- @@ -26,15 +30,19 @@ def plot_trigger(time, trigger, fileprefix, tr, thr, num_timepoints_expected, fileprefix: str or path A string representing a file name or a fullpath to a file, WITHOUT extension + tr: float + Repetition time + thr: float + Threshold used to detect the number of triggers + num_timepoints_expected: int + Number of timepoints expected by the user filename: string name of the original file - options: argparse object - The object produced by `get_parser` in `cli.run.py` - figsize: tuple - Desired size of the figure (see `matplotlib`), + figsize: tuple or list of floats + Size of the figure expressed as (size_x, size_y), Default is {FIGSIZE} dpi: int - Desired DPI of the figure (see `matplotlib`), + Desired DPI of the figure, Default is {SET_DPI} Notes @@ -42,6 +50,11 @@ def plot_trigger(time, trigger, fileprefix, tr, thr, num_timepoints_expected, Outcome: fileprefix + _trigger_time.png: Creates new plot `fileprefix_trigger_time.png`. + + See Also + -------- + https://phys2bids.readthedocs.io/en/latest/howto.html + matplotlib.pyploy.figsize """ def time2ntr(x): @@ -114,19 +127,55 @@ def plot_trigger(time, trigger, fileprefix, tr, thr, num_timepoints_expected, plt.close() -def plot_all(phys_in, infile, outfile='', dpi=SET_DPI, size=FIGSIZE): - ch_num = len(phys_in.ch_name) # get number of channels: +def plot_all(ch_name, timeseries, units, freq, infile, outfile='', dpi=SET_DPI, size=FIGSIZE): + """ + Plots all the channels for visualizations and saves them in outfile + + Parameters + ---------- + ch_name: (ch) list of strings + List of names of the channels - can be the header of the columns + in the output files. + timeseries: (ch, [tps]) list + List of numpy 1d arrays - one for channel, plus one for time. + Time channel has to be the first, trigger the second. + Contains all the timeseries recorded. + units: (ch) list of strings + List of the units of the channels. + freq: (ch) list of floats + List of floats - one per channel. + Contains all the frequencies of the recorded channel. + infile: string + name of the input file to phys2bids + outfile: string + path of the output plot + dpi: int + Desired DPI of the figure, + Default is {SET_DPI} + figsize: tuple or list of floats + Size of the figure expressed as (size_x, size_y), + Default is {FIGSIZE} + ----- + outcome: + Creates new plot with path specified in outfile. + + See Also + -------- + https://phys2bids.readthedocs.io/en/latest/howto.html + matplotlib.pyploy.figsize + """ + ch_num = len(ch_name) # get number of channels: fig, ax = plt.subplots(ch_num - 1, 1, figsize=size, sharex=True) - time = phys_in.timeseries[0] # assume time is first channel + time = timeseries[0] # assume time is first channel fig.suptitle(os.path.basename(infile)) - for row, timeser in enumerate(phys_in.timeseries[1:]): + for row, timeser in enumerate(timeseries[1:]): if timeser.shape != time.shape: time_old = np.linspace(0, time[-1], num=timeser.shape[0]) timeser = np.interp(time, time_old, timeser) ax[row].plot(time, timeser) - ax[row].set_title(f' Channel {row + 1}: {phys_in.ch_name[row + 1]}') - ax[row].set_ylabel(phys_in.units[row + 1]) - ax[row].xlim = 30 * 60 * phys_in.freq[0] # maximum display of half an hour + ax[row].set_title(f' Channel {row + 1}: {ch_name[row + 1]}') + ax[row].set_ylabel(units[row + 1]) + ax[row].xlim = 30 * 60 * freq[0] # maximum display of half an hour ax[row].grid() ax[row].set_xlabel("seconds") if outfile == '':
physiopy/phys2bids
07bec6aa41d2cc1a2da4383b8dcf112bf9775734
diff --git a/phys2bids/tests/test_viz.py b/phys2bids/tests/test_viz.py index edc8132..65b444a 100644 --- a/phys2bids/tests/test_viz.py +++ b/phys2bids/tests/test_viz.py @@ -9,7 +9,8 @@ def test_plot_all(samefreq_full_acq_file): test_path, test_filename = os.path.split(samefreq_full_acq_file) phys_obj = acq.populate_phys_input(samefreq_full_acq_file, chtrig) out = os.path.join(test_path, 'Test_belt_pulse_samefreq.png') - viz.plot_all(phys_obj, test_filename, outfile=out) + viz.plot_all(phys_obj.ch_name, phys_obj.timeseries, phys_obj.units, + phys_obj.freq, test_filename, outfile=out) assert os.path.isfile(out)
Update docstrings in viz.py <!--- Provide a general summary of the issue in the Title above --> ## Detailed Description <!--- Provide a detailed description of the change or addition you are proposing --> At the moment, the docstring of the functions in `viz.py` are either outdated (after #153) or missing (for `plot_all`). ## Context / Motivation <!--- Why is this change important to you? How would you use it? --> <!--- How can it benefit other users? --> We should update the docstrings for both users and developers. ## Possible Implementation <!--- Not obligatory, but suggest an idea for implementing addition or change --> <!--- If you already have worked on the idea, please share a link to the branch in your forked project --> @vinferrer, as you are the main author of the `viz.py` function, could you tackle this issue?
0.0
07bec6aa41d2cc1a2da4383b8dcf112bf9775734
[ "phys2bids/tests/test_viz.py::test_plot_all" ]
[ "phys2bids/tests/test_viz.py::test_plot_trigger" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-03-30 15:18:24+00:00
apache-2.0
4,536
physiopy__phys2bids-212
diff --git a/phys2bids/phys2bids.py b/phys2bids/phys2bids.py index 138a512..593b0e4 100644 --- a/phys2bids/phys2bids.py +++ b/phys2bids/phys2bids.py @@ -76,7 +76,7 @@ def print_summary(filename, ntp_expected, ntp_found, samp_freq, time_offset, out f'Timepoints expected: {ntp_expected}\n' f'Timepoints found: {ntp_found}\n' f'Sampling Frequency: {samp_freq} Hz\n' - f'Sampling started at: {start_time} s\n' + f'Sampling started at: {start_time:.4f} s\n' f'Tip: Time 0 is the time of first trigger\n' f'------------------------------------------------\n') LGR.info(summary) @@ -106,7 +106,7 @@ def print_json(outfile, samp_freq, time_offset, ch_name): """ start_time = -time_offset summary = dict(SamplingFrequency=samp_freq, - StartTime=start_time, + StartTime=round(start_time, 4), Columns=ch_name) utils.writejson(outfile, summary, indent=4, sort_keys=False) diff --git a/phys2bids/physio_obj.py b/phys2bids/physio_obj.py index 1dfe3c3..3eb1ce1 100644 --- a/phys2bids/physio_obj.py +++ b/phys2bids/physio_obj.py @@ -459,9 +459,9 @@ class BlueprintInput(): lambda x: x != 0) if is_true]) if flag == 1: LGR.info(f'The number of timepoints according to the std_thr method ' - f'is {num_timepoints_found}. The computed threshold is {thr}') + f'is {num_timepoints_found}. The computed threshold is {thr:.4f}') else: - LGR.info(f'The number of timepoints found with the manual threshold of {thr} ' + LGR.info(f'The number of timepoints found with the manual threshold of {thr:.4f} ' f'is {num_timepoints_found}') time_offset = self.timeseries[0][timepoints.argmax()]
physiopy/phys2bids
f294930242820b541e67b153c6344b10938b7668
diff --git a/phys2bids/tests/test_integration.py b/phys2bids/tests/test_integration.py index 232e8a0..1ce893b 100644 --- a/phys2bids/tests/test_integration.py +++ b/phys2bids/tests/test_integration.py @@ -82,7 +82,7 @@ def test_integration_tutorial(): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '1000.0') # Check sampling frequency - assert check_string(log_info, 'Sampling started', '0.24499999999989086') + assert check_string(log_info, 'Sampling started', '0.2450') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -92,7 +92,7 @@ def test_integration_tutorial(): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 1000.0) - assert math.isclose(json_data['StartTime'], 0.245) + assert math.isclose(json_data['StartTime'], 0.2450) assert json_data['Columns'] == ['time', 'Trigger', 'CO2', 'O2', 'Pulse'] # Remove generated files @@ -126,7 +126,7 @@ def test_integration_acq(samefreq_full_acq_file): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '10000.0') # Check sampling started - assert check_string(log_info, 'Sampling started', '10.425107798467103') + assert check_string(log_info, 'Sampling started', '10.4251') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -136,7 +136,7 @@ def test_integration_acq(samefreq_full_acq_file): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 10000.0) - assert math.isclose(json_data['StartTime'], 10.425107798467103) + assert math.isclose(json_data['StartTime'], 10.4251) assert json_data['Columns'] == ['time', 'RESP - RSP100C', 'PULSE - Custom, DA100C', 'MR TRIGGER - Custom, HLT100C - A 5', 'PPG100C', 'CO2', 'O2'] @@ -181,7 +181,7 @@ def test_integration_multifreq(multifreq_acq_file): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '625.0') # Check sampling frequency - assert check_string(log_info, 'Sampling started', '0.29052734375') + assert check_string(log_info, 'Sampling started', '0.2905') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -191,7 +191,7 @@ def test_integration_multifreq(multifreq_acq_file): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 625.0) - assert math.isclose(json_data['StartTime'], 0.29052734375) + assert math.isclose(json_data['StartTime'], 0.2905) assert json_data['Columns'] == ['PULSE - Custom, DA100C'] """ @@ -208,7 +208,7 @@ def test_integration_multifreq(multifreq_acq_file): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '10000.0') # Check sampling started - assert check_string(log_info, 'Sampling started', '10.425107798467103') + assert check_string(log_info, 'Sampling started', '10.4251') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -218,7 +218,7 @@ def test_integration_multifreq(multifreq_acq_file): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 10000.0) - assert math.isclose(json_data['StartTime'], 10.425107798467103) + assert math.isclose(json_data['StartTime'], 10.4251) assert json_data['Columns'] == ['time', 'RESP - RSP100C', 'MR TRIGGER - Custom, HLT100C - A 5', 'PPG100C', 'CO2', 'O2'] @@ -266,7 +266,7 @@ def test_integration_heuristic(): # Check sampling frequency assert check_string(log_info, 'Sampling Frequency', '1000.0') # Check sampling started - assert check_string(log_info, 'Sampling started', '0.24499999999989086') + assert check_string(log_info, 'Sampling started', '0.2450') # Check start time assert check_string(log_info, 'first trigger', 'Time 0', is_num=False) @@ -277,7 +277,7 @@ def test_integration_heuristic(): # Compares values in json file with ground truth assert math.isclose(json_data['SamplingFrequency'], 1000.0) - assert math.isclose(json_data['StartTime'], 0.24499999999989086) + assert math.isclose(json_data['StartTime'], 0.2450) assert json_data['Columns'] == ['time', 'Trigger', 'CO2', 'O2', 'Pulse'] # Remove generated files
Change floats print format Currently, any print of a float in a file has a format with many decimals. It would be better to have a formatted print with max 4 decimals. This doesn't affect the data, only the information in the json and in the log.
0.0
f294930242820b541e67b153c6344b10938b7668
[ "phys2bids/tests/test_integration.py::test_integration_tutorial", "phys2bids/tests/test_integration.py::test_integration_heuristic", "phys2bids/tests/test_integration.py::test_integration_info" ]
[ "phys2bids/tests/test_integration.py::test_logger" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-04-16 20:21:48+00:00
apache-2.0
4,537
physiopy__phys2bids-222
diff --git a/phys2bids/bids_units.py b/phys2bids/bids_units.py new file mode 100644 index 0000000..b1eb671 --- /dev/null +++ b/phys2bids/bids_units.py @@ -0,0 +1,84 @@ +import logging + +LGR = logging.getLogger(__name__) + +unit_aliases = { + # kelvin: thermodynamic temperature + 'k': 'K', 'kelvin': 'K', 'kelvins': 'K', + # mole: amount of substance + 'mol': 'mol', 'mole': 'mol', + # newton: force, weight + 'newton': 'N', 'newtons': 'N', 'n': 'N', + # pascal: pressure, stress + 'pascal': 'Pa', 'pascals': 'Pa', 'pa': 'Pa', + # volt: voltage (electrical potential), emf + 'v': 'V', 'volt': 'V', 'volts': 'V', + # degree Celsius: temperature relative to 273.15 K + '°c': '°C', '°celsius': '°C', 'celsius': '°C', + # ampere: electric current + 'a': 'A', 'ampere': 'A', 'amp': 'A', 'amps': 'A', + # second: time and hertzs + '1/hz': 's', '1/hertz': 's', 'hz': 'Hz', + '1/s': 'Hz', '1/second': 'Hz', '1/seconds': 'Hz', + '1/sec': 'Hz', '1/secs': 'Hz', 'hertz': 'Hz', + 'second': 's', 'seconds': 's', 'sec': 's', + 'secs': 's', 's': 's', +} + +# Init dictionary of aliases for multipliers. Entries are still lowercase +prefix_aliases = { + # Multiples - skip "mega" and only up to "tera" + 'da': 'da', 'deca': 'da', 'h': 'h', 'hecto': 'h', + 'k': 'k', 'kilo': 'k', 'g': 'G', 'giga': 'G', 't': 'T', + 'tera': 'T', + # Submultipliers + 'd': 'd', 'deci': 'd', 'c': 'c', 'centi': 'c', + 'milli': 'm', 'm': 'm', 'µ': 'µ', 'micro': 'µ', + 'n': 'n', 'nano': 'n', 'p': 'p', 'pico': 'p', + 'f': 'f', 'femto': 'f', 'a': 'a', 'atto': 'a', + 'z': 'z', 'zepto': 'z', 'y': 'y', 'yocto': 'y', +} + + +def bidsify_units(orig_unit): + """ + Read the input unit of measure and use the dictionary of aliases + to bidsify its value. + It is possible to make simple conversions + + Parameters + ---------- + unit: string + Unit of measure, might or might not be BIDS compliant. + + Returns + ------- + new_unit: str + BIDSified alias of input unit + + Notes + ----- + This function should implement a double check, one for unit and + the other for prefixes (e.g. "milli"). However, that is going to be tricky, + unless there is a weird way to multiply two dictionaries together. + """ + # call prefix and unit dicts + # for every unit alias in the dict + orig_unit = orig_unit.lower() + for u_key in unit_aliases.keys(): + if orig_unit.endswith(u_key): + new_unit = unit_aliases[u_key] + unit = orig_unit[:-len(u_key)] + if unit != '': + # for every prefix alias + prefix = prefix_aliases.get(unit, '') + if prefix == '': + LGR.warning(f'The given unit prefix {unit} does not have aliases, ' + f'passing it as is') + prefix = orig_unit[:len(unit)] + return prefix + new_unit + else: + return new_unit + LGR.warning(f'The given unit {orig_unit} does not have aliases, ' + f'passing it as is') + return orig_unit diff --git a/phys2bids/phys2bids.py b/phys2bids/phys2bids.py index e57bcd3..138a512 100644 --- a/phys2bids/phys2bids.py +++ b/phys2bids/phys2bids.py @@ -37,6 +37,7 @@ from numpy import savetxt from phys2bids import utils, viz, _version from phys2bids.cli.run import _get_parser from phys2bids.physio_obj import BlueprintOutput +from phys2bids.bids_units import bidsify_units LGR = logging.getLogger(__name__) @@ -265,6 +266,8 @@ def phys2bids(filename, info=False, indir='.', outdir='.', heur_file=None, LGR.info(f'Reading the file {infile}') phys_in = populate_phys_input(infile, chtrig) + for index, unit in enumerate(phys_in.units): + phys_in.units[index] = bidsify_units(unit) LGR.info('Reading infos') phys_in.print_info(filename) # #!# Here the function viz.plot_channel should be called
physiopy/phys2bids
d7ffcb0c36b683f721524a7a762d8c2f8cdb000c
diff --git a/phys2bids/tests/test_bids_units.py b/phys2bids/tests/test_bids_units.py new file mode 100644 index 0000000..4cb58a5 --- /dev/null +++ b/phys2bids/tests/test_bids_units.py @@ -0,0 +1,14 @@ +from phys2bids.bids_units import bidsify_units +from phys2bids.bids_units import unit_aliases + + +def test_bidsify_units(): + # test unit with standard prefix + assert bidsify_units('centik') == 'cK' + # test unit with not standard prefix + assert bidsify_units('matV') == 'matV' + # test unit that's not bids standard + assert bidsify_units('mmlie') == 'mmlie' + # test there is not problem with every unit in the dict + for unit_key in unit_aliases.keys(): + assert bidsify_units(unit_key) == unit_aliases[unit_key]
Adhering to BIDS units standards <!--- Provide a general summary of the issue in the Title above --> ## Detailed Description <!--- Provide a detailed description of the change or addition you are proposing --> [BIDS uses a standard format for units of measure too](https://bids-specification.readthedocs.io/en/stable/99-appendices/05-units.html). Our files have different ways to report units, but we should treat them with phys2bids! ## Possible Implementation <!--- Not obligatory, but suggest an idea for implementing addition or change --> <!--- If you already have worked on the idea, please share a link to the branch in your forked project --> A possible implementation is adding a dictionary and a function, either in the main script or in a separate file, that gets called toward the end of the `_main()` function - before exporting the `json` file. The dictionary contains different possible types of homonyms for the units, and the function reads what units our objects contain and changes them accordingly to the dictionary.
0.0
d7ffcb0c36b683f721524a7a762d8c2f8cdb000c
[ "phys2bids/tests/test_bids_units.py::test_bidsify_units" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2020-05-22 12:40:55+00:00
apache-2.0
4,538
piccolo-orm__asyncio_tools-2
diff --git a/asyncio_tools.py b/asyncio_tools.py index 3c78360..a9a6711 100644 --- a/asyncio_tools.py +++ b/asyncio_tools.py @@ -45,30 +45,23 @@ class CompoundException(Exception): class GatheredResults: # __dict__ is required for cached_property - __slots__ = ("results", "__dict__") + __slots__ = ("__results", "__dict__") def __init__(self, results: t.List[t.Any]): - self.results = results + self.__results = results ########################################################################### - def __setattr__(self, key, value): - """ - Since we use cached_properties for most of the lookups, we don't want - the underlying results to be changed. There should be no reason for a - user to want to change the results, but just to be sure we raise a - ValueError. - """ - if key == "results": - raise ValueError("results is immutable") - super().__setattr__(key, value) + @property + def results(self): + return self.__results @property def all(self) -> t.List[t.Any]: """ Just a proxy. """ - return self.results + return self.__results ###########################################################################
piccolo-orm/asyncio_tools
4e3c1f47cb4a42ba690217ba8369976546220ea9
diff --git a/tests/test_gathered_results.py b/tests/test_gathered_results.py index 1b69db0..61dcbd4 100644 --- a/tests/test_gathered_results.py +++ b/tests/test_gathered_results.py @@ -36,5 +36,12 @@ class TestGatheredResults(TestCase): def test_set(self): results = GatheredResults([]) - with self.assertRaises(ValueError): + with self.assertRaises(AttributeError): results.results = None + + def test_set_2(self): + results = asyncio.run(gather(good(), bad())) + with self.assertRaises(AttributeError): + results.results = None + self.assertEqual(len(results.all), 2) + self.assertEqual(len(results.results), 2)
gather() not working I just tried this library and got following results with exmaple in the README file: ``` $ python3.8 test.py Traceback (most recent call last): File "test.py", line 57, in <module> asyncio.run(main()) File "/usr/local/Cellar/[email protected]/3.8.13_2/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/usr/local/Cellar/[email protected]/3.8.13_2/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete return future.result() File "test.py", line 13, in main response = await gather( File "/usr/local/lib/python3.8/site-packages/asyncio_tools.py", line 137, in gather return GatheredResults(results) File "/usr/local/lib/python3.8/site-packages/asyncio_tools.py", line 51, in __init__ self.results = results File "/usr/local/lib/python3.8/site-packages/asyncio_tools.py", line 63, in __setattr__ raise ValueError("results is immutable") ValueError: results is immutable ``` and similar results when running tests: ``` 19:54 $ python --version Python 3.10.6 (.venv) ✔ ~/src/asyncio_tools [master|…2025] 19:54 $ python -m pytest -s ======================================================================== test session starts ======================================================================== platform darwin -- Python 3.10.6, pytest-7.1.3, pluggy-1.0.0 rootdir: /Users/janne.kujanpaa/src/asyncio_tools collected 4 items tests/test_gathered_results.py FFFF ============================================================================= FAILURES ============================================================================== ____________________________________________________________ TestGatheredResults.test_compound_exception ____________________________________________________________ self = <test_gathered_results.TestGatheredResults testMethod=test_compound_exception> def test_compound_exception(self): > response: GatheredResults = asyncio.run( gather(good(), bad(), good(), bad()) ) tests/test_gathered_results.py:27: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /usr/local/Cellar/[email protected]/3.10.6_1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/runners.py:44: in run return loop.run_until_complete(main) /usr/local/Cellar/[email protected]/3.10.6_1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/base_events.py:646: in run_until_complete return future.result() asyncio_tools.py:137: in gather return GatheredResults(results) asyncio_tools.py:51: in __init__ self.results = results _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <asyncio_tools.GatheredResults object at 0x104049d80>, key = 'results', value = ['OK', ValueError('Bad value'), 'OK', ValueError('Bad value')] def __setattr__(self, key, value): """ Since we use cached_properties for most of the lookups, we don't want the underlying results to be changed. There should be no reason for a user to want to change the results, but just to be sure we raise a ValueError. """ if key == "results": > raise ValueError("results is immutable") E ValueError: results is immutable asyncio_tools.py:63: ValueError ________________________________________________________________ TestGatheredResults.test_exceptions ________________________________________________________________ self = <test_gathered_results.TestGatheredResults testMethod=test_exceptions> def test_exceptions(self): > response: GatheredResults = asyncio.run(gather(good(), bad(), good())) tests/test_gathered_results.py:17: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /usr/local/Cellar/[email protected]/3.10.6_1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/runners.py:44: in run return loop.run_until_complete(main) /usr/local/Cellar/[email protected]/3.10.6_1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/base_events.py:646: in run_until_complete return future.result() asyncio_tools.py:137: in gather return GatheredResults(results) asyncio_tools.py:51: in __init__ self.results = results _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <asyncio_tools.GatheredResults object at 0x10416f730>, key = 'results', value = ['OK', ValueError('Bad value'), 'OK'] def __setattr__(self, key, value): """ Since we use cached_properties for most of the lookups, we don't want the underlying results to be changed. There should be no reason for a user to want to change the results, but just to be sure we raise a ValueError. """ if key == "results": > raise ValueError("results is immutable") E ValueError: results is immutable asyncio_tools.py:63: ValueError ___________________________________________________________________ TestGatheredResults.test_set ____________________________________________________________________ self = <test_gathered_results.TestGatheredResults testMethod=test_set> def test_set(self): > results = GatheredResults([]) tests/test_gathered_results.py:38: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ asyncio_tools.py:51: in __init__ self.results = results _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <asyncio_tools.GatheredResults object at 0x10417c190>, key = 'results', value = [] def __setattr__(self, key, value): """ Since we use cached_properties for most of the lookups, we don't want the underlying results to be changed. There should be no reason for a user to want to change the results, but just to be sure we raise a ValueError. """ if key == "results": > raise ValueError("results is immutable") E ValueError: results is immutable asyncio_tools.py:63: ValueError ________________________________________________________________ TestGatheredResults.test_successes _________________________________________________________________ self = <test_gathered_results.TestGatheredResults testMethod=test_successes> def test_successes(self): > response: GatheredResults = asyncio.run(gather(good(), bad(), good())) tests/test_gathered_results.py:22: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /usr/local/Cellar/[email protected]/3.10.6_1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/runners.py:44: in run return loop.run_until_complete(main) /usr/local/Cellar/[email protected]/3.10.6_1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/base_events.py:646: in run_until_complete return future.result() asyncio_tools.py:137: in gather return GatheredResults(results) asyncio_tools.py:51: in __init__ self.results = results _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <asyncio_tools.GatheredResults object at 0x103ad4280>, key = 'results', value = ['OK', ValueError('Bad value'), 'OK'] def __setattr__(self, key, value): """ Since we use cached_properties for most of the lookups, we don't want the underlying results to be changed. There should be no reason for a user to want to change the results, but just to be sure we raise a ValueError. """ if key == "results": > raise ValueError("results is immutable") E ValueError: results is immutable asyncio_tools.py:63: ValueError ====================================================================== short test summary info ====================================================================== FAILED tests/test_gathered_results.py::TestGatheredResults::test_compound_exception - ValueError: results is immutable FAILED tests/test_gathered_results.py::TestGatheredResults::test_exceptions - ValueError: results is immutable FAILED tests/test_gathered_results.py::TestGatheredResults::test_set - ValueError: results is immutable FAILED tests/test_gathered_results.py::TestGatheredResults::test_successes - ValueError: results is immutable ========================================================================= 4 failed in 0.34s ========================================================================= (.venv) ✔ ~/src/asyncio_tools [master|…2025] ```
0.0
4e3c1f47cb4a42ba690217ba8369976546220ea9
[ "tests/test_gathered_results.py::TestGatheredResults::test_compound_exception", "tests/test_gathered_results.py::TestGatheredResults::test_exceptions", "tests/test_gathered_results.py::TestGatheredResults::test_set", "tests/test_gathered_results.py::TestGatheredResults::test_set_2", "tests/test_gathered_results.py::TestGatheredResults::test_successes" ]
[]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-10-09 14:18:07+00:00
mit
4,539
pierreablin__picard-45
diff --git a/README.rst b/README.rst index 49fb927..d82d5b3 100644 --- a/README.rst +++ b/README.rst @@ -159,3 +159,9 @@ If you use this code in your project, please cite:: Faster ICA under orthogonal constraint ICASSP, 2018 https://arxiv.org/abs/1711.10873 + + +Changelog +--------- + +New in 0.8 : for the density `exp`, the default parameter is now alpha = 0.1 instead of alpha = 1. \ No newline at end of file diff --git a/doc/doc-requirements.txt b/doc/doc-requirements.txt index 540bae1..69e9182 100644 --- a/doc/doc-requirements.txt +++ b/doc/doc-requirements.txt @@ -3,4 +3,6 @@ numpydoc pillow scikit-learn >=0.18 sphinx-bootstrap-theme -sphinx-gallery \ No newline at end of file +sphinx-gallery +pooch +tqdm \ No newline at end of file diff --git a/picard/solver.py b/picard/solver.py index fc4bee9..5cdb06a 100644 --- a/picard/solver.py +++ b/picard/solver.py @@ -148,7 +148,7 @@ def picard(X, fun='tanh', n_components=None, ortho=True, extended=None, if fun == 'tanh': fun = Tanh() elif fun == 'exp': - fun = Exp() + fun = Exp(params={'alpha': 0.1}) elif fun == 'cube': fun = Cube() elif check_fun:
pierreablin/picard
9a8bc6e7eb75a8bd53f0f267d02bdbeec75bc58a
diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml index 34bbc6e..fff8351 100644 --- a/.github/workflows/unittests.yml +++ b/.github/workflows/unittests.yml @@ -13,7 +13,7 @@ jobs: strategy: fail-fast: false matrix: - python: [3.7, 3.8, 3.9] + python: [3.8, 3.9] steps: - uses: actions/checkout@v2 - uses: conda-incubator/setup-miniconda@v2 diff --git a/picard/tests/test_solver.py b/picard/tests/test_solver.py index 1a63ba6..885e538 100644 --- a/picard/tests/test_solver.py +++ b/picard/tests/test_solver.py @@ -187,7 +187,7 @@ def test_picardo(): if fun == 'tanh': fun = Tanh() elif fun == 'exp': - fun = Exp() + fun = Exp(params={'alpha': 0.1}) elif fun == 'cube': fun = Cube() # Get the final gradient norm
[Bug][version : 0.7] Running picard with infomax and an exponential density model leads to a FloatingPointError Hello, First of all, thank you for this great package. It helps me a lot in my project. Running picard on simple data sets I encountered a FloatingPointError with infomax and the exponential density model. Thank you for your help, Nicolas Captier **package version : 0.7** ## Description of the bug When running `picard` function with `ortho = False` , `extended = False` and `fun = 'exp'` a FloatingPointError appears. I tested different data sets and I systematically encountered this error. ## Minimal code to reproduce the bug ``` import numpy as np from picard import picard N, T = 3, 1000 S = np.random.laplace(size=(N, T)) A = np.random.randn(N, N) X = np.dot(A, S) K, W, Y = picard(X , ortho = False , extended = False , fun = 'exp') ``` ## Screen shots of the error messages ![image](https://user-images.githubusercontent.com/60263988/145220109-e8a239ed-692c-4291-8437-b6607e1188a8.png) ![image](https://user-images.githubusercontent.com/60263988/145220314-830e8d12-c2ad-4058-900e-98f9baba106d.png)
0.0
9a8bc6e7eb75a8bd53f0f267d02bdbeec75bc58a
[ "picard/tests/test_solver.py::test_picardo" ]
[ "picard/tests/test_solver.py::test_dimension_reduction", "picard/tests/test_solver.py::test_dots", "picard/tests/test_solver.py::test_pre_fastica", "picard/tests/test_solver.py::test_picard", "picard/tests/test_solver.py::test_extended", "picard/tests/test_solver.py::test_shift", "picard/tests/test_solver.py::test_bad_custom_density", "picard/tests/test_solver.py::test_fun", "picard/tests/test_solver.py::test_no_regression", "picard/tests/test_solver.py::test_amari_distance" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-12-08 14:21:16+00:00
bsd-3-clause
4,540
pika__pika-1041
diff --git a/README.rst b/README.rst index a7a52be..1fcb7c7 100644 --- a/README.rst +++ b/README.rst @@ -12,9 +12,9 @@ extensions. - Python 2.7 and 3.4+ are supported. - Since threads aren't appropriate to every situation, it doesn't - require threads. It takes care not to forbid them, either. The same - goes for greenlets, callbacks, continuations and generators. It is - not necessarily thread-safe however, and your mileage will vary. + require threads. Pika core takes care not to forbid them, either. The same + goes for greenlets, callbacks, continuations, and generators. An instance of + Pika's built-in connection adapters is not thread-safe, however. - People may be using direct sockets, plain old `select()`, or any of the wide variety of ways of getting network events to and from a diff --git a/pika/heartbeat.py b/pika/heartbeat.py index af6d93c..a3822a4 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -50,7 +50,7 @@ class HeartbeatChecker(object): :rtype True """ - return self._connection.heartbeat is self + return self._connection._heartbeat_checker is self @property def bytes_received_on_connection(self):
pika/pika
d9a1baa40b162cc4c638f95a8e4e9ab666af4288
diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index c9e8b63..2ff98a7 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -20,15 +20,39 @@ import pika.exceptions # pylint: disable=C0103 +class ConstructableConnection(connection.Connection): + """Adds dummy overrides for `Connection`'s abstract methods so + that we can instantiate and test it. + + """ + def _adapter_connect_stream(self): + pass + + def _adapter_disconnect_stream(self): + raise NotImplementedError + + def add_timeout(self, deadline, callback): + raise NotImplementedError + + def remove_timeout(self, timeout_id): + raise NotImplementedError + + def _adapter_emit_data(self, data): + raise NotImplementedError + + def _adapter_get_write_buffer_size(self): + raise NotImplementedError + + class HeartbeatTests(unittest.TestCase): INTERVAL = 5 def setUp(self): - self.mock_conn = mock.Mock(spec=connection.Connection) + self.mock_conn = mock.Mock(spec_set=ConstructableConnection()) self.mock_conn.bytes_received = 100 self.mock_conn.bytes_sent = 100 - self.mock_conn.heartbeat = mock.Mock(spec=heartbeat.HeartbeatChecker) + self.mock_conn._heartbeat_checker = mock.Mock(spec=heartbeat.HeartbeatChecker) self.obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) def tearDown(self): @@ -65,11 +89,11 @@ class HeartbeatTests(unittest.TestCase): timer.assert_called_once_with() def test_active_true(self): - self.mock_conn.heartbeat = self.obj + self.mock_conn._heartbeat_checker = self.obj self.assertTrue(self.obj.active) def test_active_false(self): - self.mock_conn.heartbeat = mock.Mock() + self.mock_conn._heartbeat_checker = mock.Mock() self.assertFalse(self.obj.active) def test_bytes_received_on_connection(self): @@ -178,7 +202,7 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') def test_start_timer_active(self, setup_timer): - self.mock_conn.heartbeat = self.obj + self.mock_conn._heartbeat_checker = self.obj self.obj._start_timer() self.assertTrue(setup_timer.called)
BlockingConnection heartbeat attribute can be undefined I've been testing connection recovery docs and managed to run into the following exceptions several times: ``` Traceback (most recent call last): File "./blocking_consumer_recovery2.py", line 31, in <module> channel.start_consuming() File "/Users/antares/Development/RabbitMQ/pika.git/pika/adapters/blocking_connection.py", line 1878, in start_consuming self._process_data_events(time_limit=None) File "/Users/antares/Development/RabbitMQ/pika.git/pika/adapters/blocking_connection.py", line 2040, in _process_data_events self.connection.process_data_events(time_limit=time_limit) File "/Users/antares/Development/RabbitMQ/pika.git/pika/adapters/blocking_connection.py", line 814, in process_data_events self._flush_output(common_terminator) File "/Users/antares/Development/RabbitMQ/pika.git/pika/adapters/blocking_connection.py", line 525, in _flush_output self._impl.ioloop.process_timeouts() File "/Users/antares/Development/RabbitMQ/pika.git/pika/adapters/select_connection.py", line 462, in process_timeouts self._timer.process_timeouts() File "/Users/antares/Development/RabbitMQ/pika.git/pika/adapters/select_connection.py", line 297, in process_timeouts timeout.callback() File "/Users/antares/Development/RabbitMQ/pika.git/pika/heartbeat.py", line 104, in send_and_check self._start_timer() File "/Users/antares/Development/RabbitMQ/pika.git/pika/heartbeat.py", line 167, in _start_timer if self.active: File "/Users/antares/Development/RabbitMQ/pika.git/pika/heartbeat.py", line 53, in active return self._connection.heartbeat is self AttributeError: 'SelectConnection' object has no attribute 'heartbeat' ``` unfortunately I don't have a 100% clear idea of how to reproduce this but all I did was restarting down nodes cleanly. At some point during recovery attempt the above exception would make the process exit. Perhaps the code that checks for `self._connection.heartbeat` should be more defensive.
0.0
d9a1baa40b162cc4c638f95a8e4e9ab666af4288
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active" ]
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-05-09 06:29:34+00:00
bsd-3-clause
4,541
pika__pika-1057
diff --git a/pika/heartbeat.py b/pika/heartbeat.py index 4eaaa59..3c9f46f 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -12,21 +12,24 @@ class HeartbeatChecker(object): intervals. """ + DEFAULT_INTERVAL = 60 MAX_IDLE_COUNT = 2 + _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds" - def __init__(self, connection, interval, idle_count=MAX_IDLE_COUNT): + def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT): """Create a heartbeat on connection sending a heartbeat frame every interval seconds. :param pika.connection.Connection: Connection object - :param int interval: Heartbeat check interval - :param int idle_count: Number of heartbeat intervals missed until the - connection is considered idle and disconnects + :param int interval: Heartbeat check interval. Note: heartbeats will + be sent at interval / 2 frequency. """ self._connection = connection - self._interval = interval + # Note: see the following document: + # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout + self._interval = float(interval / 2) self._max_idle_count = idle_count # Initialize counters
pika/pika
c0ed61e151c02dc3a84a7c7e4ca5f72ab9d87d9f
diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py index 9f40320..438633c 100644 --- a/tests/unit/connection_tests.py +++ b/tests/unit/connection_tests.py @@ -536,7 +536,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904 _adapter_emit_data, method, heartbeat_checker): - """make sure on connection tune turns the connection params""" + """make sure _on_connection_tune tunes the connection params""" heartbeat_checker.return_value = 'hearbeat obj' self.connection._flush_outbound = mock.Mock() marshal = mock.Mock(return_value='ab') diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index c5a7ca5..eaf339f 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -55,27 +55,31 @@ class ConstructableConnection(connection.Connection): class HeartbeatTests(unittest.TestCase): - INTERVAL = 5 + INTERVAL = 60 + HALF_INTERVAL = INTERVAL / 2 def setUp(self): self.mock_conn = mock.Mock(spec_set=ConstructableConnection()) self.mock_conn.bytes_received = 100 self.mock_conn.bytes_sent = 100 self.mock_conn._heartbeat_checker = mock.Mock(spec=heartbeat.HeartbeatChecker) - self.obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + self.obj = heartbeat.HeartbeatChecker(self.mock_conn) def tearDown(self): del self.obj del self.mock_conn + def test_default_initialization_interval(self): + self.assertEqual(self.obj._interval, self.HALF_INTERVAL) + def test_default_initialization_max_idle_count(self): self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT) def test_constructor_assignment_connection(self): - self.assertEqual(self.obj._connection, self.mock_conn) + self.assertIs(self.obj._connection, self.mock_conn) def test_constructor_assignment_heartbeat_interval(self): - self.assertEqual(self.obj._interval, self.INTERVAL) + self.assertEqual(self.obj._interval, self.HALF_INTERVAL) def test_constructor_initial_bytes_received(self): self.assertEqual(self.obj._bytes_received, 0) @@ -94,7 +98,7 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') def test_constructor_called_setup_timer(self, timer): - heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + heartbeat.HeartbeatChecker(self.mock_conn) timer.assert_called_once_with() def test_active_true(self): @@ -122,13 +126,13 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') def test_send_and_check_not_closed(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() close_connection.assert_not_called() @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') def test_send_and_check_missed_bytes(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj._idle_byte_intervals = self.INTERVAL obj.send_and_check() close_connection.assert_called_once_with() @@ -147,19 +151,19 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._update_counters') def test_send_and_check_update_counters(self, update_counters): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() update_counters.assert_called_once_with() @mock.patch('pika.heartbeat.HeartbeatChecker._send_heartbeat_frame') def test_send_and_check_send_heartbeat_frame(self, send_heartbeat_frame): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() send_heartbeat_frame.assert_called_once_with() @mock.patch('pika.heartbeat.HeartbeatChecker._start_timer') def test_send_and_check_start_timer(self, start_timer): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() start_timer.assert_called_once_with() @@ -202,7 +206,7 @@ class HeartbeatTests(unittest.TestCase): def test_setup_timer_called(self): self.mock_conn._adapter_add_timeout.assert_called_once_with( - self.INTERVAL, self.obj.send_and_check) + self.HALF_INTERVAL, self.obj.send_and_check) @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') def test_start_timer_not_active(self, setup_timer):
HeartbeatChecker is confused about heartbeat timeouts cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate. `HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`. So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds. So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection. This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window. I see two problems here: 1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?) 2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
0.0
c0ed61e151c02dc3a84a7c7e4ca5f72ab9d87d9f
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals" ]
[ "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_start", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_auth_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_next_channel_number_returns_lowest_unused", "tests/unit/connection_tests.py::ConnectionTests::test_set_backpressure_multiplier", "tests/unit/connection_tests.py::ConnectionTests::test_client_properties_default", "tests/unit/connection_tests.py::ConnectionTests::test_no_side_effects_from_message_marshal_error", "tests/unit/connection_tests.py::ConnectionTests::test_close_closes_opening_channels", "tests/unit/connection_tests.py::ConnectionTests::test_close_does_not_close_closing_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_last_channel_calls_on_close_ready", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_protocol_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closed_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_with_closing_channels", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_blocked_in_a_row_sets_timer_once", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closing_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_close_callback", "tests/unit/connection_tests.py::ConnectionTests::test_create_with_blocked_connection_timeout_config", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_start_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_connection_closed_callback", "tests/unit/connection_tests.py::ConnectionTests::test_close_closes_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_ok", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_init_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_connect_no_adapter_connect_from_constructor_with_external_workflow", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_blocked_callback", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_timeout_terminates_connection", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_tune", "tests/unit/connection_tests.py::ConnectionTests::test_send_message_updates_frames_sent_and_bytes_sent", "tests/unit/connection_tests.py::ConnectionTests::test_close_raises_wrong_state_when_already_closed_or_closing", "tests/unit/connection_tests.py::ConnectionTests::test_close_channels", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_unblocked_callback", "tests/unit/connection_tests.py::ConnectionTests::test_channel", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_more_channels_no_on_close_ready", "tests/unit/connection_tests.py::ConnectionTests::test_connection_blocked_sets_timer", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_non_closing_state", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_open_error_callback", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_tune_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_access_denied_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_from_broker_passes_correct_exception", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_protocol_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_close_calls_on_close_ready_when_no_channels", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_on_stream_terminated_removes_timer", "tests/unit/connection_tests.py::ConnectionTests::test_deliver_frame_to_channel_with_frame_for_unknown_channel", "tests/unit/connection_tests.py::ConnectionTests::test_add_callbacks", "tests/unit/connection_tests.py::ConnectionTests::test_new_conn_should_use_first_channel", "tests/unit/connection_tests.py::ConnectionTests::test_on_data_available", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_unblocked_in_a_row_removes_timer_once", "tests/unit/connection_tests.py::ConnectionTests::test_client_properties_override", "tests/unit/connection_tests.py::ConnectionTests::test_client_properties", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_cleans_up", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_unblocked_removes_timer", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_connected" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2018-05-31 21:02:39+00:00
bsd-3-clause
4,542
pika__pika-1064
diff --git a/examples/consume.py b/examples/consume.py index 17667de..2254cd1 100644 --- a/examples/consume.py +++ b/examples/consume.py @@ -1,12 +1,19 @@ import functools +import logging import pika +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) + +logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + def on_message(channel, method_frame, header_frame, body, userdata=None): - print('Userdata: {} Message body: {}'.format(userdata, body)) + LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body)) channel.basic_ack(delivery_tag=method_frame.delivery_tag) credentials = pika.PlainCredentials('guest', 'guest') -parameters = pika.ConnectionParameters('localhost', credentials=credentials) +parameters = pika.ConnectionParameters('localhost', credentials=credentials) connection = pika.BlockingConnection(parameters) channel = connection.channel() diff --git a/pika/adapters/utils/connection_workflow.py b/pika/adapters/utils/connection_workflow.py index e6926c9..e3567b5 100644 --- a/pika/adapters/utils/connection_workflow.py +++ b/pika/adapters/utils/connection_workflow.py @@ -14,6 +14,7 @@ import socket import pika.compat import pika.exceptions import pika.tcp_socket_opts +from pika import __version__ _LOG = logging.getLogger(__name__) @@ -186,7 +187,8 @@ class AMQPConnector(object): self._sock.setblocking(False) addr = self._addr_record[4] - _LOG.info('Connecting to AMQP broker at %r', addr) + _LOG.info('Pika version %s connecting to %r', + __version__, addr) self._task_ref = self._nbio.connect_socket( self._sock, addr, diff --git a/pika/heartbeat.py b/pika/heartbeat.py index 4eaaa59..3c9f46f 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -12,21 +12,24 @@ class HeartbeatChecker(object): intervals. """ + DEFAULT_INTERVAL = 60 MAX_IDLE_COUNT = 2 + _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds" - def __init__(self, connection, interval, idle_count=MAX_IDLE_COUNT): + def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT): """Create a heartbeat on connection sending a heartbeat frame every interval seconds. :param pika.connection.Connection: Connection object - :param int interval: Heartbeat check interval - :param int idle_count: Number of heartbeat intervals missed until the - connection is considered idle and disconnects + :param int interval: Heartbeat check interval. Note: heartbeats will + be sent at interval / 2 frequency. """ self._connection = connection - self._interval = interval + # Note: see the following document: + # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout + self._interval = float(interval / 2) self._max_idle_count = idle_count # Initialize counters
pika/pika
c0ed61e151c02dc3a84a7c7e4ca5f72ab9d87d9f
diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py index 9f40320..438633c 100644 --- a/tests/unit/connection_tests.py +++ b/tests/unit/connection_tests.py @@ -536,7 +536,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904 _adapter_emit_data, method, heartbeat_checker): - """make sure on connection tune turns the connection params""" + """make sure _on_connection_tune tunes the connection params""" heartbeat_checker.return_value = 'hearbeat obj' self.connection._flush_outbound = mock.Mock() marshal = mock.Mock(return_value='ab') diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index c5a7ca5..eaf339f 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -55,27 +55,31 @@ class ConstructableConnection(connection.Connection): class HeartbeatTests(unittest.TestCase): - INTERVAL = 5 + INTERVAL = 60 + HALF_INTERVAL = INTERVAL / 2 def setUp(self): self.mock_conn = mock.Mock(spec_set=ConstructableConnection()) self.mock_conn.bytes_received = 100 self.mock_conn.bytes_sent = 100 self.mock_conn._heartbeat_checker = mock.Mock(spec=heartbeat.HeartbeatChecker) - self.obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + self.obj = heartbeat.HeartbeatChecker(self.mock_conn) def tearDown(self): del self.obj del self.mock_conn + def test_default_initialization_interval(self): + self.assertEqual(self.obj._interval, self.HALF_INTERVAL) + def test_default_initialization_max_idle_count(self): self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT) def test_constructor_assignment_connection(self): - self.assertEqual(self.obj._connection, self.mock_conn) + self.assertIs(self.obj._connection, self.mock_conn) def test_constructor_assignment_heartbeat_interval(self): - self.assertEqual(self.obj._interval, self.INTERVAL) + self.assertEqual(self.obj._interval, self.HALF_INTERVAL) def test_constructor_initial_bytes_received(self): self.assertEqual(self.obj._bytes_received, 0) @@ -94,7 +98,7 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') def test_constructor_called_setup_timer(self, timer): - heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + heartbeat.HeartbeatChecker(self.mock_conn) timer.assert_called_once_with() def test_active_true(self): @@ -122,13 +126,13 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') def test_send_and_check_not_closed(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() close_connection.assert_not_called() @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') def test_send_and_check_missed_bytes(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj._idle_byte_intervals = self.INTERVAL obj.send_and_check() close_connection.assert_called_once_with() @@ -147,19 +151,19 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._update_counters') def test_send_and_check_update_counters(self, update_counters): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() update_counters.assert_called_once_with() @mock.patch('pika.heartbeat.HeartbeatChecker._send_heartbeat_frame') def test_send_and_check_send_heartbeat_frame(self, send_heartbeat_frame): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() send_heartbeat_frame.assert_called_once_with() @mock.patch('pika.heartbeat.HeartbeatChecker._start_timer') def test_send_and_check_start_timer(self, start_timer): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj = heartbeat.HeartbeatChecker(self.mock_conn) obj.send_and_check() start_timer.assert_called_once_with() @@ -202,7 +206,7 @@ class HeartbeatTests(unittest.TestCase): def test_setup_timer_called(self): self.mock_conn._adapter_add_timeout.assert_called_once_with( - self.INTERVAL, self.obj.send_and_check) + self.HALF_INTERVAL, self.obj.send_and_check) @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') def test_start_timer_not_active(self, setup_timer):
Include the Pika version number in the log when opening a connection See @vitaly-krugl's comment [here](https://github.com/pika/pika/issues/1046#issuecomment-392585166)
0.0
c0ed61e151c02dc3a84a7c7e4ca5f72ab9d87d9f
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false" ]
[ "tests/unit/connection_tests.py::ConnectionTests::test_client_properties_override", "tests/unit/connection_tests.py::ConnectionTests::test_client_properties", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_connection_closed_callback", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_init_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_close_calls_on_close_ready_when_no_channels", "tests/unit/connection_tests.py::ConnectionTests::test_close_closes_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_connect_no_adapter_connect_from_constructor_with_external_workflow", "tests/unit/connection_tests.py::ConnectionTests::test_on_data_available", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_tune", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_blocked_in_a_row_sets_timer_once", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_ok", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closed_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_protocol_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_new_conn_should_use_first_channel", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_tune_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_open_error_callback", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_on_stream_terminated_removes_timer", "tests/unit/connection_tests.py::ConnectionTests::test_set_backpressure_multiplier", "tests/unit/connection_tests.py::ConnectionTests::test_add_callbacks", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_access_denied_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_non_closing_state", "tests/unit/connection_tests.py::ConnectionTests::test_send_message_updates_frames_sent_and_bytes_sent", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_blocked_callback", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_with_closing_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_from_broker_passes_correct_exception", "tests/unit/connection_tests.py::ConnectionTests::test_channel", "tests/unit/connection_tests.py::ConnectionTests::test_close_raises_wrong_state_when_already_closed_or_closing", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_more_channels_no_on_close_ready", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_timeout_terminates_connection", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_cleans_up", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_auth_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_close_channels", "tests/unit/connection_tests.py::ConnectionTests::test_close_closes_opening_channels", "tests/unit/connection_tests.py::ConnectionTests::test_client_properties_default", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_start", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_last_channel_calls_on_close_ready", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_unblocked_removes_timer", "tests/unit/connection_tests.py::ConnectionTests::test_no_side_effects_from_message_marshal_error", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_close_callback", "tests/unit/connection_tests.py::ConnectionTests::test_connection_blocked_sets_timer", "tests/unit/connection_tests.py::ConnectionTests::test_next_channel_number_returns_lowest_unused", "tests/unit/connection_tests.py::ConnectionTests::test_deliver_frame_to_channel_with_frame_for_unknown_channel", "tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_unblocked_in_a_row_removes_timer_once", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_protocol_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_create_with_blocked_connection_timeout_config", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closing_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_stream_connected", "tests/unit/connection_tests.py::ConnectionTests::test_channel_on_start_connection_raises_connection_closed", "tests/unit/connection_tests.py::ConnectionTests::test_close_does_not_close_closing_channels", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_unblocked_callback" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-06-06 17:35:44+00:00
bsd-3-clause
4,543
pika__pika-1066
diff --git a/examples/consume.py b/examples/consume.py index da95d9e..7344149 100644 --- a/examples/consume.py +++ b/examples/consume.py @@ -1,17 +1,15 @@ +import functools +import logging import pika -def on_message(channel, method_frame, header_frame, body): - channel.queue_declare(queue=body, auto_delete=True) +LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' + '-35s %(lineno) -5d: %(message)s') +LOGGER = logging.getLogger(__name__) - if body.startswith("queue:"): - queue = body.replace("queue:", "") - key = body + "_key" - print("Declaring queue %s bound with key %s" %(queue, key)) - channel.queue_declare(queue=queue, auto_delete=True) - channel.queue_bind(queue=queue, exchange="test_exchange", routing_key=key) - else: - print("Message body", body) +logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) +def on_message(channel, method_frame, header_frame, body, userdata=None): + LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body)) channel.basic_ack(delivery_tag=method_frame.delivery_tag) credentials = pika.PlainCredentials('guest', 'guest') @@ -24,7 +22,8 @@ channel.queue_declare(queue="standard", auto_delete=True) channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key") channel.basic_qos(prefetch_count=1) -channel.basic_consume(on_message, 'standard') +on_message_callback = functools.partial(on_message, userdata='on_message_userdata') +channel.basic_consume(on_message_callback, 'standard') try: channel.start_consuming() diff --git a/pika/heartbeat.py b/pika/heartbeat.py index c02d5df..8d3d20a 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -23,13 +23,22 @@ class HeartbeatChecker(object): :param pika.connection.Connection: Connection object :param int interval: Heartbeat check interval. Note: heartbeats will be sent at interval / 2 frequency. + :param int idle_count: The number of heartbeat intervals without data + received that will close the current connection. """ self._connection = connection + # Note: see the following document: # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout self._interval = float(interval / 2) - self._max_idle_count = idle_count + + # Note: even though we're sending heartbeats in half the specified + # interval, the broker will be sending them to us at the specified + # interval. This means we'll be checking for an idle connection + # twice as many times as the broker will send heartbeats to us, + # so we need to double the max idle count here + self._max_idle_count = idle_count * 2 # Initialize counters self._bytes_received = 0 @@ -82,9 +91,12 @@ class HeartbeatChecker(object): been idle too long. """ - LOGGER.debug('Received %i heartbeat frames, sent %i', + LOGGER.debug('Received %i heartbeat frames, sent %i, ' + 'idle intervals %i, max idle count %i', self._heartbeat_frames_received, - self._heartbeat_frames_sent) + self._heartbeat_frames_sent, + self._idle_byte_intervals, + self._max_idle_count) if self.connection_is_idle: return self._close_connection()
pika/pika
17aed0fa20f55ed3bc080320414badbb27046e8d
diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index fa97338..f0431c2 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -29,7 +29,7 @@ class HeartbeatTests(unittest.TestCase): self.assertEqual(self.obj._interval, self.HALF_INTERVAL) def test_default_initialization_max_idle_count(self): - self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT) + self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2) def test_constructor_assignment_connection(self): self.assertIs(self.obj._connection, self.mock_conn)
HeartbeatChecker is confused about heartbeat timeouts cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate. `HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`. So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds. So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection. This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window. I see two problems here: 1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?) 2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
0.0
17aed0fa20f55ed3bc080320414badbb27046e8d
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count" ]
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-06-06 22:49:26+00:00
bsd-3-clause
4,544
pika__pika-1067
diff --git a/examples/consume.py b/examples/consume.py index 2254cd1..e4f86de 100644 --- a/examples/consume.py +++ b/examples/consume.py @@ -6,7 +6,7 @@ LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' '-35s %(lineno) -5d: %(message)s') LOGGER = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) +logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) def on_message(channel, method_frame, header_frame, body, userdata=None): LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body)) diff --git a/pika/heartbeat.py b/pika/heartbeat.py index 3c9f46f..73a6df8 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -24,13 +24,22 @@ class HeartbeatChecker(object): :param pika.connection.Connection: Connection object :param int interval: Heartbeat check interval. Note: heartbeats will be sent at interval / 2 frequency. + :param int idle_count: The number of heartbeat intervals without data + received that will close the current connection. """ self._connection = connection + # Note: see the following document: # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout self._interval = float(interval / 2) - self._max_idle_count = idle_count + + # Note: even though we're sending heartbeats in half the specified + # interval, the broker will be sending them to us at the specified + # interval. This means we'll be checking for an idle connection + # twice as many times as the broker will send heartbeats to us, + # so we need to double the max idle count here + self._max_idle_count = idle_count * 2 # Initialize counters self._bytes_received = 0 @@ -83,9 +92,12 @@ class HeartbeatChecker(object): been idle too long. """ - LOGGER.debug('Received %i heartbeat frames, sent %i', + LOGGER.debug('Received %i heartbeat frames, sent %i, ' + 'idle intervals %i, max idle count %i', self._heartbeat_frames_received, - self._heartbeat_frames_sent) + self._heartbeat_frames_sent, + self._idle_byte_intervals, + self._max_idle_count) if self.connection_is_idle: self._close_connection()
pika/pika
e1f0ef51de293395392bd3d28dba4262a61fbc98
diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index eaf339f..1cb1160 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -73,7 +73,7 @@ class HeartbeatTests(unittest.TestCase): self.assertEqual(self.obj._interval, self.HALF_INTERVAL) def test_default_initialization_max_idle_count(self): - self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT) + self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2) def test_constructor_assignment_connection(self): self.assertIs(self.obj._connection, self.mock_conn)
HeartbeatChecker is confused about heartbeat timeouts cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate. `HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`. So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds. So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection. This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window. I see two problems here: 1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?) 2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
0.0
e1f0ef51de293395392bd3d28dba4262a61fbc98
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count" ]
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2018-06-06 23:14:54+00:00
bsd-3-clause
4,545
pika__pika-1071
diff --git a/examples/consume.py b/examples/consume.py index 7344149..26e4620 100644 --- a/examples/consume.py +++ b/examples/consume.py @@ -1,3 +1,4 @@ +"""Basic message consumer example""" import functools import logging import pika @@ -8,26 +9,36 @@ LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) -def on_message(channel, method_frame, header_frame, body, userdata=None): - LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body)) - channel.basic_ack(delivery_tag=method_frame.delivery_tag) - -credentials = pika.PlainCredentials('guest', 'guest') -parameters = pika.ConnectionParameters('localhost', credentials=credentials) -connection = pika.BlockingConnection(parameters) - -channel = connection.channel() -channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True, auto_delete=False) -channel.queue_declare(queue="standard", auto_delete=True) -channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key") -channel.basic_qos(prefetch_count=1) - -on_message_callback = functools.partial(on_message, userdata='on_message_userdata') -channel.basic_consume(on_message_callback, 'standard') - -try: - channel.start_consuming() -except KeyboardInterrupt: - channel.stop_consuming() - -connection.close() +def on_message(chan, method_frame, _header_frame, body, userdata=None): + """Called when a message is received. Log message and ack it.""" + LOGGER.info('Userdata: %s Message body: %s', userdata, body) + chan.basic_ack(delivery_tag=method_frame.delivery_tag) + +def main(): + """Main method.""" + credentials = pika.PlainCredentials('guest', 'guest') + parameters = pika.ConnectionParameters('localhost', credentials=credentials) + connection = pika.BlockingConnection(parameters) + + channel = connection.channel() + channel.exchange_declare(exchange="test_exchange", + exchange_type="direct", + passive=False, + durable=True, + auto_delete=False) + channel.queue_declare(queue="standard", auto_delete=True) + channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key") + channel.basic_qos(prefetch_count=1) + + on_message_callback = functools.partial(on_message, userdata='on_message_userdata') + channel.basic_consume(on_message_callback, 'standard') + + try: + channel.start_consuming() + except KeyboardInterrupt: + channel.stop_consuming() + + connection.close() + +if __name__ == '__main__': + main() diff --git a/pika/connection.py b/pika/connection.py index 0c4e2a7..bed9bdb 100644 --- a/pika/connection.py +++ b/pika/connection.py @@ -1301,7 +1301,7 @@ class Connection(object): self._backpressure_multiplier = value # - # Connections state properties + # Connection state properties # @property diff --git a/pika/heartbeat.py b/pika/heartbeat.py index 8d3d20a..7d4d7dd 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -7,38 +7,67 @@ LOGGER = logging.getLogger(__name__) class HeartbeatChecker(object): - """Checks to make sure that our heartbeat is received at the expected - intervals. + """Sends heartbeats to the broker. The provided timeout is used to + determine if the connection is stale - no received heartbeats or + other activity will close the connection. See the parameter list for more + details. """ - DEFAULT_INTERVAL = 60 - MAX_IDLE_COUNT = 2 _CONNECTION_FORCED = 320 - _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds" + _STALE_CONNECTION = "No activity or too many missed meartbeats in the last %i seconds" - def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT): - """Create a heartbeat on connection sending a heartbeat frame every - interval seconds. + def __init__(self, connection, timeout): + """Create an object that will check for activity on the provided + connection as well as receive heartbeat frames from the broker. The + timeout parameter defines a window within which this activity must + happen. If not, the connection is considered dead and closed. + + The value passed for timeout is also used to calculate an interval + at which a heartbeat frame is sent to the broker. The interval is + equal to the timeout value divided by two. :param pika.connection.Connection: Connection object - :param int interval: Heartbeat check interval. Note: heartbeats will - be sent at interval / 2 frequency. - :param int idle_count: The number of heartbeat intervals without data - received that will close the current connection. + :param int timeout: Connection idle timeout. If no activity occurs on the + connection nor heartbeat frames received during the + timeout window the connection will be closed. The + interval used to send heartbeats is calculated from + this value by dividing it by two. """ + if timeout < 1: + raise ValueError('timeout must >= 0, but got %r' % (timeout,)) + self._connection = connection - # Note: see the following document: + # Note: see the following documents: # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout - self._interval = float(interval / 2) - - # Note: even though we're sending heartbeats in half the specified - # interval, the broker will be sending them to us at the specified - # interval. This means we'll be checking for an idle connection - # twice as many times as the broker will send heartbeats to us, - # so we need to double the max idle count here - self._max_idle_count = idle_count * 2 + # https://github.com/pika/pika/pull/1072 + # https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion + # There is a certain amount of confusion around how client developers + # interpret the spec. The spec talks about 2 missed heartbeats as a + # *timeout*, plus that any activity on the connection counts for a + # heartbeat. This is to avoid edge cases and not to depend on network + # latency. + self._timeout = timeout + + self._send_interval = float(timeout) / 2 + + # Note: Pika will calculate the heartbeat / connectivity check interval + # by adding 5 seconds to the negotiated timeout to leave a bit of room + # for broker heartbeats that may be right at the edge of the timeout + # window. This is different behavior from the RabbitMQ Java client and + # the spec that suggests a check interval equivalent to two times the + # heartbeat timeout value. But, one advantage of adding a small amount + # is that bad connections will be detected faster. + # https://github.com/pika/pika/pull/1072#issuecomment-397850795 + # https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780 + # https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192 + self._check_interval = timeout + 5 + + LOGGER.debug('timeout: %f send_interval: %f check_interval: %f', + self._timeout, + self._send_interval, + self._check_interval) # Initialize counters self._bytes_received = 0 @@ -47,21 +76,10 @@ class HeartbeatChecker(object): self._heartbeat_frames_sent = 0 self._idle_byte_intervals = 0 - # The handle for the last timer - self._timer = None - - # Setup the timer to fire in _interval seconds - self._setup_timer() - - @property - def active(self): - """Return True if the connection's heartbeat attribute is set to this - instance. - - :rtype True - - """ - return self._connection.heartbeat is self + self._send_timer = None + self._check_timer = None + self._start_send_timer() + self._start_check_timer() @property def bytes_received_on_connection(self): @@ -78,74 +96,78 @@ class HeartbeatChecker(object): to trip the max idle threshold. """ - return self._idle_byte_intervals >= self._max_idle_count + return self._idle_byte_intervals > 0 def received(self): """Called when a heartbeat is received""" LOGGER.debug('Received heartbeat frame') self._heartbeat_frames_received += 1 - def send_and_check(self): - """Invoked by a timer to send a heartbeat when we need to, check to see + def _send_heartbeat(self): + """Invoked by a timer to send a heartbeat when we need to. + + """ + LOGGER.debug('Sending heartbeat frame') + self._send_heartbeat_frame() + self._start_send_timer() + + def _check_heartbeat(self): + """Invoked by a timer to check for broker heartbeats. Checks to see if we've missed any heartbeats and disconnect our connection if it's been idle too long. """ + if self._has_received_data: + self._idle_byte_intervals = 0 + else: + # Connection has not received any data, increment the counter + self._idle_byte_intervals += 1 + LOGGER.debug('Received %i heartbeat frames, sent %i, ' - 'idle intervals %i, max idle count %i', + 'idle intervals %i', self._heartbeat_frames_received, self._heartbeat_frames_sent, - self._idle_byte_intervals, - self._max_idle_count) + self._idle_byte_intervals) if self.connection_is_idle: - return self._close_connection() - - # Connection has not received any data, increment the counter - if not self._has_received_data: - self._idle_byte_intervals += 1 - else: - self._idle_byte_intervals = 0 + self._close_connection() + return - # Update the counters of bytes sent/received and the frames received - self._update_counters() - - # Send a heartbeat frame - self._send_heartbeat_frame() - - # Update the timer to fire again - self._start_timer() + self._start_check_timer() def stop(self): """Stop the heartbeat checker""" - if self._timer: - LOGGER.debug('Removing timeout for next heartbeat interval') - self._connection.remove_timeout(self._timer) - self._timer = None + if self._send_timer: + LOGGER.debug('Removing timer for next heartbeat send interval') + self._connection.remove_timeout(self._send_timer) # pylint: disable=W0212 + self._send_timer = None + if self._check_timer: + LOGGER.debug('Removing timer for next heartbeat check interval') + self._connection.remove_timeout(self._check_timer) # pylint: disable=W0212 + self._check_timer = None def _close_connection(self): """Close the connection with the AMQP Connection-Forced value.""" LOGGER.info('Connection is idle, %i stale byte intervals', self._idle_byte_intervals) - duration = self._max_idle_count * self._interval - text = HeartbeatChecker._STALE_CONNECTION % duration + text = HeartbeatChecker._STALE_CONNECTION % self._timeout # NOTE: this won't achieve the perceived effect of sending # Connection.Close to broker, because the frame will only get buffered # in memory before the next statement terminates the connection. self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text) - self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, + self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, # pylint: disable=W0212 text) @property def _has_received_data(self): - """Returns True if the connection has received data on the connection. + """Returns True if the connection has received data. :rtype: bool """ - return not self._bytes_received == self.bytes_received_on_connection + return self._bytes_received != self.bytes_received_on_connection @staticmethod def _new_heartbeat_frame(): @@ -161,25 +183,27 @@ class HeartbeatChecker(object): """ LOGGER.debug('Sending heartbeat frame') - self._connection._send_frame(self._new_heartbeat_frame()) + self._connection._send_frame( # pylint: disable=W0212 + self._new_heartbeat_frame()) self._heartbeat_frames_sent += 1 - def _setup_timer(self): - """Use the connection objects delayed_call function which is - implemented by the Adapter for calling the check_heartbeats function - every interval seconds. - - """ - self._timer = self._connection.add_timeout(self._interval, - self.send_and_check) - - def _start_timer(self): - """If the connection still has this object set for heartbeats, add a - new timer. + def _start_send_timer(self): + """Start a new heartbeat send timer.""" + self._send_timer = self._connection.add_timeout( # pylint: disable=W0212 + self._send_interval, + self._send_heartbeat) + + def _start_check_timer(self): + """Start a new heartbeat check timer.""" + # Note: update counters now to get current values + # at the start of the timeout window. Values will be + # checked against the connection's byte count at the + # end of the window + self._update_counters() - """ - if self.active: - self._setup_timer() + self._check_timer = self._connection.add_timeout( # pylint: disable=W0212 + self._check_interval, + self._check_heartbeat) def _update_counters(self): """Update the internal counters for bytes sent and received and the
pika/pika
107fb0fd7028250fda0d8f901b65c93a91d7cb82
diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index f0431c2..71fa552 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -8,11 +8,11 @@ import mock from pika import connection, frame, heartbeat - class HeartbeatTests(unittest.TestCase): INTERVAL = 60 - HALF_INTERVAL = INTERVAL / 2 + SEND_INTERVAL = float(INTERVAL) / 2 + CHECK_INTERVAL = INTERVAL + 5 def setUp(self): self.mock_conn = mock.Mock(spec=connection.Connection) @@ -25,23 +25,26 @@ class HeartbeatTests(unittest.TestCase): del self.obj del self.mock_conn - def test_default_initialization_interval(self): - self.assertEqual(self.obj._interval, self.HALF_INTERVAL) - - def test_default_initialization_max_idle_count(self): - self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2) - def test_constructor_assignment_connection(self): self.assertIs(self.obj._connection, self.mock_conn) - def test_constructor_assignment_heartbeat_interval(self): - self.assertEqual(self.obj._interval, self.HALF_INTERVAL) + def test_constructor_assignment_intervals(self): + self.assertEqual(self.obj._send_interval, self.SEND_INTERVAL) + self.assertEqual(self.obj._check_interval, self.CHECK_INTERVAL) def test_constructor_initial_bytes_received(self): - self.assertEqual(self.obj._bytes_received, 0) + # Note: _bytes_received is initialized by calls + # to _start_check_timer which calls _update_counters + # which reads the initial values from the connection + self.assertEqual(self.obj._bytes_received, + self.mock_conn.bytes_received) def test_constructor_initial_bytes_sent(self): - self.assertEqual(self.obj._bytes_received, 0) + # Note: _bytes_received is initialized by calls + # to _start_check_timer which calls _update_counters + # which reads the initial values from the connection + self.assertEqual(self.obj._bytes_sent, + self.mock_conn.bytes_sent) def test_constructor_initial_heartbeat_frames_received(self): self.assertEqual(self.obj._heartbeat_frames_received, 0) @@ -52,18 +55,15 @@ class HeartbeatTests(unittest.TestCase): def test_constructor_initial_idle_byte_intervals(self): self.assertEqual(self.obj._idle_byte_intervals, 0) - @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') - def test_constructor_called_setup_timer(self, timer): - heartbeat.HeartbeatChecker(self.mock_conn) + @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer') + def test_constructor_called_start_send_timer(self, timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) timer.assert_called_once_with() - def test_active_true(self): - self.mock_conn.heartbeat = self.obj - self.assertTrue(self.obj.active) - - def test_active_false(self): - self.mock_conn.heartbeat = mock.Mock() - self.assertFalse(self.obj.active) + @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer') + def test_constructor_called_start_check_timer(self, timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + timer.assert_called_once_with() def test_bytes_received_on_connection(self): self.mock_conn.bytes_received = 128 @@ -81,54 +81,63 @@ class HeartbeatTests(unittest.TestCase): self.assertTrue(self.obj._heartbeat_frames_received, 1) @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') - def test_send_and_check_not_closed(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() + def test_send_heartbeat_not_closed(self, close_connection): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj._send_heartbeat() close_connection.assert_not_called() @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') - def test_send_and_check_missed_bytes(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn) + def test_check_heartbeat_not_closed(self, close_connection): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + self.mock_conn.bytes_received = 128 + obj._check_heartbeat() + close_connection.assert_not_called() + + @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') + def test_check_heartbeat_missed_bytes(self, close_connection): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) obj._idle_byte_intervals = self.INTERVAL - obj.send_and_check() + obj._check_heartbeat() close_connection.assert_called_once_with() - def test_send_and_check_increment_no_bytes(self): + def test_check_heartbeat_increment_no_bytes(self): self.mock_conn.bytes_received = 100 self.obj._bytes_received = 100 - self.obj.send_and_check() + self.obj._check_heartbeat() self.assertEqual(self.obj._idle_byte_intervals, 1) - def test_send_and_check_increment_bytes(self): + def test_check_heartbeat_increment_bytes(self): self.mock_conn.bytes_received = 100 self.obj._bytes_received = 128 - self.obj.send_and_check() + self.obj._check_heartbeat() self.assertEqual(self.obj._idle_byte_intervals, 0) @mock.patch('pika.heartbeat.HeartbeatChecker._update_counters') - def test_send_and_check_update_counters(self, update_counters): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() + def test_check_heartbeat_update_counters(self, update_counters): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) update_counters.assert_called_once_with() @mock.patch('pika.heartbeat.HeartbeatChecker._send_heartbeat_frame') - def test_send_and_check_send_heartbeat_frame(self, send_heartbeat_frame): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() + def test_send_heartbeat_sends_heartbeat_frame(self, send_heartbeat_frame): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj._send_heartbeat() send_heartbeat_frame.assert_called_once_with() - @mock.patch('pika.heartbeat.HeartbeatChecker._start_timer') - def test_send_and_check_start_timer(self, start_timer): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() - start_timer.assert_called_once_with() + @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer') + def test_send_heartbeat_start_timer(self, start_send_timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + start_send_timer.assert_called_once_with() + + @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer') + def test_check_heartbeat_start_timer(self, start_check_timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + start_check_timer.assert_called_once_with() def test_connection_close(self): self.obj._idle_byte_intervals = 3 self.obj._idle_heartbeat_intervals = 4 self.obj._close_connection() - reason = self.obj._STALE_CONNECTION % ( - self.obj._max_idle_count * self.obj._interval) + reason = self.obj._STALE_CONNECTION % self.obj._timeout self.mock_conn.close.assert_called_once_with( self.obj._CONNECTION_FORCED, reason) self.mock_conn._on_terminate.assert_called_once_with( @@ -157,20 +166,17 @@ class HeartbeatTests(unittest.TestCase): self.obj._send_heartbeat_frame() self.assertEqual(self.obj._heartbeat_frames_sent, 1) - def test_setup_timer_called(self): - self.mock_conn.add_timeout.assert_called_once_with( - self.HALF_INTERVAL, self.obj.send_and_check) - - @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') - def test_start_timer_not_active(self, setup_timer): - self.obj._start_timer() - setup_timer.assert_not_called() + def test_start_send_timer_called(self): + want = [mock.call(self.SEND_INTERVAL, self.obj._send_heartbeat), + mock.call(self.CHECK_INTERVAL, self.obj._check_heartbeat)] + got = self.mock_conn.add_timeout.call_args_list + self.assertEqual(got, want) - @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') - def test_start_timer_active(self, setup_timer): + @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer') + def test_start_timer_active(self, setup_send_timer): self.mock_conn.heartbeat = self.obj - self.obj._start_timer() - self.assertTrue(setup_timer.called) + self.obj._start_send_timer() + self.assertTrue(setup_send_timer.called) def test_update_counters_bytes_received(self): self.mock_conn.bytes_received = 256
HeartbeatChecker is confused about heartbeat timeouts cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate. `HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`. So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds. So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection. This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window. I see two problems here: 1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?) 2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
0.0
107fb0fd7028250fda0d8f901b65c93a91d7cb82
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_check_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_sends_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_send_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_send_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_no_bytes" ]
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-06-09 18:20:40+00:00
bsd-3-clause
4,546
pika__pika-1072
diff --git a/examples/consume.py b/examples/consume.py index e4f86de..7499934 100644 --- a/examples/consume.py +++ b/examples/consume.py @@ -1,3 +1,4 @@ +"""Basic message consumer example""" import functools import logging import pika @@ -8,26 +9,37 @@ LOGGER = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) -def on_message(channel, method_frame, header_frame, body, userdata=None): - LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body)) - channel.basic_ack(delivery_tag=method_frame.delivery_tag) -credentials = pika.PlainCredentials('guest', 'guest') -parameters = pika.ConnectionParameters('localhost', credentials=credentials) -connection = pika.BlockingConnection(parameters) +def on_message(chan, method_frame, _header_frame, body, userdata=None): + """Called when a message is received. Log message and ack it.""" + LOGGER.info('Userdata: %s Message body: %s', userdata, body) + chan.basic_ack(delivery_tag=method_frame.delivery_tag) -channel = connection.channel() -channel.exchange_declare(exchange='test_exchange', exchange_type='direct', passive=False, durable=True, auto_delete=False) -channel.queue_declare(queue='standard', auto_delete=True) -channel.queue_bind(queue='standard', exchange='test_exchange', routing_key='standard_key') -channel.basic_qos(prefetch_count=1) +def main(): + """Main method.""" + credentials = pika.PlainCredentials('guest', 'guest') + parameters = pika.ConnectionParameters('localhost', credentials=credentials) + connection = pika.BlockingConnection(parameters) -on_message_callback = functools.partial(on_message, userdata='on_message_userdata') -channel.basic_consume('standard', on_message_callback) + channel = connection.channel() + channel.exchange_declare(exchange='test_exchange', + exchange_type='direct', + passive=False, + durable=True, + auto_delete=False) + channel.queue_declare(queue='standard', auto_delete=True) + channel.queue_bind(queue='standard', exchange='test_exchange', routing_key='standard_key') + channel.basic_qos(prefetch_count=1) -try: - channel.start_consuming() -except KeyboardInterrupt: - channel.stop_consuming() + on_message_callback = functools.partial(on_message, userdata='on_message_userdata') + channel.basic_consume('standard', on_message_callback) -connection.close() + try: + channel.start_consuming() + except KeyboardInterrupt: + channel.stop_consuming() + + connection.close() + +if __name__ == '__main__': + main() diff --git a/pika/connection.py b/pika/connection.py index 159e923..4b328b4 100644 --- a/pika/connection.py +++ b/pika/connection.py @@ -1402,7 +1402,7 @@ class Connection(pika.compat.AbstractBase): self._backpressure_multiplier = value # - # Connections state properties + # Connection state properties # @property diff --git a/pika/heartbeat.py b/pika/heartbeat.py index 73a6df8..d678978 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -8,38 +8,66 @@ LOGGER = logging.getLogger(__name__) class HeartbeatChecker(object): - """Checks to make sure that our heartbeat is received at the expected - intervals. + """Sends heartbeats to the broker. The provided timeout is used to + determine if the connection is stale - no received heartbeats or + other activity will close the connection. See the parameter list for more + details. """ - DEFAULT_INTERVAL = 60 - MAX_IDLE_COUNT = 2 + _STALE_CONNECTION = "No activity or too many missed meartbeats in the last %i seconds" - _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds" + def __init__(self, connection, timeout): + """Create an object that will check for activity on the provided + connection as well as receive heartbeat frames from the broker. The + timeout parameter defines a window within which this activity must + happen. If not, the connection is considered dead and closed. - def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT): - """Create a heartbeat on connection sending a heartbeat frame every - interval seconds. + The value passed for timeout is also used to calculate an interval + at which a heartbeat frame is sent to the broker. The interval is + equal to the timeout value divided by two. :param pika.connection.Connection: Connection object - :param int interval: Heartbeat check interval. Note: heartbeats will - be sent at interval / 2 frequency. - :param int idle_count: The number of heartbeat intervals without data - received that will close the current connection. + :param int timeout: Connection idle timeout. If no activity occurs on the + connection nor heartbeat frames received during the + timeout window the connection will be closed. The + interval used to send heartbeats is calculated from + this value by dividing it by two. """ + if timeout < 1: + raise ValueError('timeout must >= 0, but got %r' % (timeout,)) + self._connection = connection - # Note: see the following document: + # Note: see the following documents: # https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout - self._interval = float(interval / 2) - - # Note: even though we're sending heartbeats in half the specified - # interval, the broker will be sending them to us at the specified - # interval. This means we'll be checking for an idle connection - # twice as many times as the broker will send heartbeats to us, - # so we need to double the max idle count here - self._max_idle_count = idle_count * 2 + # https://github.com/pika/pika/pull/1072 + # https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion + # There is a certain amount of confusion around how client developers + # interpret the spec. The spec talks about 2 missed heartbeats as a + # *timeout*, plus that any activity on the connection counts for a + # heartbeat. This is to avoid edge cases and not to depend on network + # latency. + self._timeout = timeout + + self._send_interval = float(timeout) / 2 + + # Note: Pika will calculate the heartbeat / connectivity check interval + # by adding 5 seconds to the negotiated timeout to leave a bit of room + # for broker heartbeats that may be right at the edge of the timeout + # window. This is different behavior from the RabbitMQ Java client and + # the spec that suggests a check interval equivalent to two times the + # heartbeat timeout value. But, one advantage of adding a small amount + # is that bad connections will be detected faster. + # https://github.com/pika/pika/pull/1072#issuecomment-397850795 + # https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780 + # https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192 + self._check_interval = timeout + 5 + + LOGGER.debug('timeout: %f send_interval: %f check_interval: %f', + self._timeout, + self._send_interval, + self._check_interval) # Initialize counters self._bytes_received = 0 @@ -48,21 +76,10 @@ class HeartbeatChecker(object): self._heartbeat_frames_sent = 0 self._idle_byte_intervals = 0 - # The handle for the last timer - self._timer = None - - # Setup the timer to fire in _interval seconds - self._setup_timer() - - @property - def active(self): - """Return True if the connection's heartbeat attribute is set to this - instance. - - :rtype True - - """ - return self._connection._heartbeat_checker is self + self._send_timer = None + self._check_timer = None + self._start_send_timer() + self._start_check_timer() @property def bytes_received_on_connection(self): @@ -79,58 +96,61 @@ class HeartbeatChecker(object): to trip the max idle threshold. """ - return self._idle_byte_intervals >= self._max_idle_count + return self._idle_byte_intervals > 0 def received(self): """Called when a heartbeat is received""" LOGGER.debug('Received heartbeat frame') self._heartbeat_frames_received += 1 - def send_and_check(self): - """Invoked by a timer to send a heartbeat when we need to, check to see + def _send_heartbeat(self): + """Invoked by a timer to send a heartbeat when we need to. + + """ + LOGGER.debug('Sending heartbeat frame') + self._send_heartbeat_frame() + self._start_send_timer() + + def _check_heartbeat(self): + """Invoked by a timer to check for broker heartbeats. Checks to see if we've missed any heartbeats and disconnect our connection if it's been idle too long. """ + if self._has_received_data: + self._idle_byte_intervals = 0 + else: + # Connection has not received any data, increment the counter + self._idle_byte_intervals += 1 + LOGGER.debug('Received %i heartbeat frames, sent %i, ' - 'idle intervals %i, max idle count %i', + 'idle intervals %i', self._heartbeat_frames_received, self._heartbeat_frames_sent, - self._idle_byte_intervals, - self._max_idle_count) + self._idle_byte_intervals) if self.connection_is_idle: self._close_connection() return - # Connection has not received any data, increment the counter - if not self._has_received_data: - self._idle_byte_intervals += 1 - else: - self._idle_byte_intervals = 0 - - # Update the counters of bytes sent/received and the frames received - self._update_counters() - - # Send a heartbeat frame - self._send_heartbeat_frame() - - # Update the timer to fire again - self._start_timer() + self._start_check_timer() def stop(self): """Stop the heartbeat checker""" - if self._timer: - LOGGER.debug('Removing timeout for next heartbeat interval') - self._connection._adapter_remove_timeout(self._timer) # pylint: disable=W0212 - self._timer = None + if self._send_timer: + LOGGER.debug('Removing timer for next heartbeat send interval') + self._connection._adapter_remove_timeout(self._send_timer) # pylint: disable=W0212 + self._send_timer = None + if self._check_timer: + LOGGER.debug('Removing timer for next heartbeat check interval') + self._connection._adapter_remove_timeout(self._check_timer) # pylint: disable=W0212 + self._check_timer = None def _close_connection(self): """Close the connection with the AMQP Connection-Forced value.""" LOGGER.info('Connection is idle, %i stale byte intervals', self._idle_byte_intervals) - duration = self._max_idle_count * self._interval - text = HeartbeatChecker._STALE_CONNECTION % duration + text = HeartbeatChecker._STALE_CONNECTION % self._timeout # Abort the stream connection. There is no point trying to gracefully # close the AMQP connection since lack of heartbeat suggests that the @@ -140,12 +160,12 @@ class HeartbeatChecker(object): @property def _has_received_data(self): - """Returns True if the connection has received data on the connection. + """Returns True if the connection has received data. :rtype: bool """ - return not self._bytes_received == self.bytes_received_on_connection + return self._bytes_received != self.bytes_received_on_connection @staticmethod def _new_heartbeat_frame(): @@ -165,23 +185,23 @@ class HeartbeatChecker(object): self._new_heartbeat_frame()) self._heartbeat_frames_sent += 1 - def _setup_timer(self): - """Use the connection objects delayed_call function which is - implemented by the Adapter for calling the check_heartbeats function - every interval seconds. - - """ - self._timer = self._connection._adapter_add_timeout( # pylint: disable=W0212 - self._interval, - self.send_and_check) - - def _start_timer(self): - """If the connection still has this object set for heartbeats, add a - new timer. + def _start_send_timer(self): + """Start a new heartbeat send timer.""" + self._send_timer = self._connection._adapter_add_timeout( # pylint: disable=W0212 + self._send_interval, + self._send_heartbeat) + + def _start_check_timer(self): + """Start a new heartbeat check timer.""" + # Note: update counters now to get current values + # at the start of the timeout window. Values will be + # checked against the connection's byte count at the + # end of the window + self._update_counters() - """ - if self.active: - self._setup_timer() + self._check_timer = self._connection._adapter_add_timeout( # pylint: disable=W0212 + self._check_interval, + self._check_heartbeat) def _update_counters(self): """Update the internal counters for bytes sent and received and the
pika/pika
4c904dea651caaf2a54b0fca0b9e908dec18a4f8
diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index 1cb1160..983bac0 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -43,49 +43,53 @@ class ConstructableConnection(connection.Connection): def _adapter_get_write_buffer_size(self): raise NotImplementedError - def _adapter_add_callback_threadsafe(self): + def _adapter_add_callback_threadsafe(self, callback): raise NotImplementedError - def _adapter_add_timeout(self): + def _adapter_add_timeout(self, deadline, callback): raise NotImplementedError - def _adapter_remove_timeout(self): + def _adapter_remove_timeout(self, timeout_id): raise NotImplementedError class HeartbeatTests(unittest.TestCase): INTERVAL = 60 - HALF_INTERVAL = INTERVAL / 2 + SEND_INTERVAL = float(INTERVAL) / 2 + CHECK_INTERVAL = INTERVAL + 5 def setUp(self): self.mock_conn = mock.Mock(spec_set=ConstructableConnection()) self.mock_conn.bytes_received = 100 self.mock_conn.bytes_sent = 100 self.mock_conn._heartbeat_checker = mock.Mock(spec=heartbeat.HeartbeatChecker) - self.obj = heartbeat.HeartbeatChecker(self.mock_conn) + self.obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) def tearDown(self): del self.obj del self.mock_conn - def test_default_initialization_interval(self): - self.assertEqual(self.obj._interval, self.HALF_INTERVAL) - - def test_default_initialization_max_idle_count(self): - self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2) - def test_constructor_assignment_connection(self): self.assertIs(self.obj._connection, self.mock_conn) - def test_constructor_assignment_heartbeat_interval(self): - self.assertEqual(self.obj._interval, self.HALF_INTERVAL) + def test_constructor_assignment_intervals(self): + self.assertEqual(self.obj._send_interval, self.SEND_INTERVAL) + self.assertEqual(self.obj._check_interval, self.CHECK_INTERVAL) def test_constructor_initial_bytes_received(self): - self.assertEqual(self.obj._bytes_received, 0) + # Note: _bytes_received is initialized by calls + # to _start_check_timer which calls _update_counters + # which reads the initial values from the connection + self.assertEqual(self.obj._bytes_received, + self.mock_conn.bytes_received) def test_constructor_initial_bytes_sent(self): - self.assertEqual(self.obj._bytes_received, 0) + # Note: _bytes_received is initialized by calls + # to _start_check_timer which calls _update_counters + # which reads the initial values from the connection + self.assertEqual(self.obj._bytes_sent, + self.mock_conn.bytes_sent) def test_constructor_initial_heartbeat_frames_received(self): self.assertEqual(self.obj._heartbeat_frames_received, 0) @@ -96,18 +100,15 @@ class HeartbeatTests(unittest.TestCase): def test_constructor_initial_idle_byte_intervals(self): self.assertEqual(self.obj._idle_byte_intervals, 0) - @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') - def test_constructor_called_setup_timer(self, timer): - heartbeat.HeartbeatChecker(self.mock_conn) + @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer') + def test_constructor_called_start_send_timer(self, timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) timer.assert_called_once_with() - def test_active_true(self): - self.mock_conn._heartbeat_checker = self.obj - self.assertTrue(self.obj.active) - - def test_active_false(self): - self.mock_conn._heartbeat_checker = mock.Mock() - self.assertFalse(self.obj.active) + @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer') + def test_constructor_called_start_check_timer(self, timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + timer.assert_called_once_with() def test_bytes_received_on_connection(self): self.mock_conn.bytes_received = 128 @@ -125,54 +126,63 @@ class HeartbeatTests(unittest.TestCase): self.assertTrue(self.obj._heartbeat_frames_received, 1) @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') - def test_send_and_check_not_closed(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() + def test_send_heartbeat_not_closed(self, close_connection): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj._send_heartbeat() close_connection.assert_not_called() @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') - def test_send_and_check_missed_bytes(self, close_connection): - obj = heartbeat.HeartbeatChecker(self.mock_conn) + def test_check_heartbeat_not_closed(self, close_connection): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + self.mock_conn.bytes_received = 128 + obj._check_heartbeat() + close_connection.assert_not_called() + + @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection') + def test_check_heartbeat_missed_bytes(self, close_connection): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) obj._idle_byte_intervals = self.INTERVAL - obj.send_and_check() + obj._check_heartbeat() close_connection.assert_called_once_with() - def test_send_and_check_increment_no_bytes(self): + def test_check_heartbeat_increment_no_bytes(self): self.mock_conn.bytes_received = 100 self.obj._bytes_received = 100 - self.obj.send_and_check() + self.obj._check_heartbeat() self.assertEqual(self.obj._idle_byte_intervals, 1) - def test_send_and_check_increment_bytes(self): + def test_check_heartbeat_increment_bytes(self): self.mock_conn.bytes_received = 100 self.obj._bytes_received = 128 - self.obj.send_and_check() + self.obj._check_heartbeat() self.assertEqual(self.obj._idle_byte_intervals, 0) @mock.patch('pika.heartbeat.HeartbeatChecker._update_counters') - def test_send_and_check_update_counters(self, update_counters): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() + def test_check_heartbeat_update_counters(self, update_counters): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) update_counters.assert_called_once_with() @mock.patch('pika.heartbeat.HeartbeatChecker._send_heartbeat_frame') - def test_send_and_check_send_heartbeat_frame(self, send_heartbeat_frame): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() + def test_send_heartbeat_sends_heartbeat_frame(self, send_heartbeat_frame): + obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + obj._send_heartbeat() send_heartbeat_frame.assert_called_once_with() - @mock.patch('pika.heartbeat.HeartbeatChecker._start_timer') - def test_send_and_check_start_timer(self, start_timer): - obj = heartbeat.HeartbeatChecker(self.mock_conn) - obj.send_and_check() - start_timer.assert_called_once_with() + @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer') + def test_send_heartbeat_start_timer(self, start_send_timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + start_send_timer.assert_called_once_with() + + @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer') + def test_check_heartbeat_start_timer(self, start_check_timer): + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + start_check_timer.assert_called_once_with() def test_connection_close(self): self.obj._idle_byte_intervals = 3 self.obj._idle_heartbeat_intervals = 4 self.obj._close_connection() - reason = self.obj._STALE_CONNECTION % ( - self.obj._max_idle_count * self.obj._interval) + reason = self.obj._STALE_CONNECTION % self.obj._timeout self.mock_conn._terminate_stream.assert_called_once_with(mock.ANY) self.assertIsInstance(self.mock_conn._terminate_stream.call_args[0][0], @@ -204,20 +214,11 @@ class HeartbeatTests(unittest.TestCase): self.obj._send_heartbeat_frame() self.assertEqual(self.obj._heartbeat_frames_sent, 1) - def test_setup_timer_called(self): - self.mock_conn._adapter_add_timeout.assert_called_once_with( - self.HALF_INTERVAL, self.obj.send_and_check) - - @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') - def test_start_timer_not_active(self, setup_timer): - self.obj._start_timer() - setup_timer.assert_not_called() - - @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') - def test_start_timer_active(self, setup_timer): - self.mock_conn._heartbeat_checker = self.obj - self.obj._start_timer() - self.assertTrue(setup_timer.called) + def test_start_send_timer_called(self): + want = [mock.call(self.SEND_INTERVAL, self.obj._send_heartbeat), + mock.call(self.CHECK_INTERVAL, self.obj._check_heartbeat)] + got = self.mock_conn._adapter_add_timeout.call_args_list + self.assertEqual(got, want) def test_update_counters_bytes_received(self): self.mock_conn.bytes_received = 256
HeartbeatChecker is confused about heartbeat timeouts cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate. `HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`. So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds. So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection. This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window. I see two problems here: 1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?) 2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
0.0
4c904dea651caaf2a54b0fca0b9e908dec18a4f8
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_check_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_sends_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_send_timer_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_send_timer" ]
[ "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-06-09 18:26:43+00:00
bsd-3-clause
4,547
pika__pika-1132
diff --git a/pika/connection.py b/pika/connection.py index 477581b..15ba02d 100644 --- a/pika/connection.py +++ b/pika/connection.py @@ -9,8 +9,8 @@ import functools import logging import math import numbers +import os import platform -import socket import warnings import ssl @@ -963,27 +963,64 @@ class URLParameters(Parameters): """Deserialize and apply the corresponding query string arg """ - options = ast.literal_eval(value) - if options is None: + opts = ast.literal_eval(value) + if opts is None: if self.ssl_options is not None: raise ValueError( 'Specified ssl_options=None URL arg is inconsistent with ' 'the specified https URL scheme.') else: - # Convert options to pika.SSLOptions via ssl.SSLSocket() - sock = socket.socket() - try: - ssl_sock = ssl.SSLSocket(sock=sock, **options) - try: - self.ssl_options = pika.SSLOptions( - context=ssl_sock.context, - server_hostname=ssl_sock.server_hostname) - finally: - ssl_sock.close() - finally: - sock.close() - - + # Note: this is the deprecated wrap_socket signature and info: + # + # Internally, function creates a SSLContext with protocol + # ssl_version and SSLContext.options set to cert_reqs. + # If parameters keyfile, certfile, ca_certs or ciphers are set, + # then the values are passed to SSLContext.load_cert_chain(), + # SSLContext.load_verify_locations(), and SSLContext.set_ciphers(). + # + # ssl.wrap_socket(sock, + # keyfile=None, + # certfile=None, + # server_side=False, # Not URL-supported + # cert_reqs=CERT_NONE, # Not URL-supported + # ssl_version=PROTOCOL_TLS, # Not URL-supported + # ca_certs=None, + # do_handshake_on_connect=True, # Not URL-supported + # suppress_ragged_eofs=True, # Not URL-supported + # ciphers=None + cxt = None + if 'ca_certs' in opts: + opt_ca_certs = opts['ca_certs'] + if os.path.isfile(opt_ca_certs): + cxt = ssl.create_default_context(cafile=opt_ca_certs) + elif os.path.isdir(opt_ca_certs): + cxt = ssl.create_default_context(capath=opt_ca_certs) + else: + LOGGER.warning('ca_certs is specified via ssl_options but ' + 'is neither a valid file nor directory: "%s"', + opt_ca_certs) + + if 'certfile' in opts: + if os.path.isfile(opts['certfile']): + keyfile = opts.get('keyfile') + password = opts.get('password') + cxt.load_cert_chain(opts['certfile'], keyfile, password) + else: + LOGGER.warning('certfile is specified via ssl_options but ' + 'is not a valid file: "%s"', + opts['certfile']) + + if 'ciphers' in opts: + opt_ciphers = opts['ciphers'] + if opt_ciphers is not None: + cxt.set_ciphers(opt_ciphers) + else: + LOGGER.warning('ciphers specified in ssl_options but ' + 'evaluates to None') + + server_hostname = opts.get('server_hostname') + self.ssl_options = pika.SSLOptions(context=cxt, + server_hostname=server_hostname) def _set_url_tcp_options(self, value): """Deserialize and apply the corresponding query string arg"""
pika/pika
e58f3a1995e9a9185cc13bd82732208a0604f881
diff --git a/tests/unit/connection_parameters_tests.py b/tests/unit/connection_parameters_tests.py index 339734b..1ef464a 100644 --- a/tests/unit/connection_parameters_tests.py +++ b/tests/unit/connection_parameters_tests.py @@ -699,12 +699,10 @@ class URLParametersTests(ParametersTestsBase): # on <VerifyMode.CERT_NONE: 1>: # {'cert_reqs': <VerifyMode.CERT_NONE: 1>, 'server_hostname': 'blah.blah.com'} 'ssl_options': { - 'keyfile': None, - 'certfile': None, - 'ssl_version': int(ssl.PROTOCOL_SSLv23), - 'ca_certs': None, - 'cert_reqs': int(ssl.CERT_NONE), - 'npn_protocols': None, + 'ca_certs': '/etc/ssl', + 'certfile': '/etc/certs/cert.pem', + 'keyfile': '/etc/certs/key.pem', + 'password': 'test123', 'ciphers': None, 'server_hostname': 'blah.blah.com' }, @@ -719,7 +717,7 @@ class URLParametersTests(ParametersTestsBase): test_params['backpressure_detection'] = backpressure virtual_host = '/' query_string = urlencode(test_params) - test_url = ('https://myuser:[email protected]:5678/%s?%s' % ( + test_url = ('amqps://myuser:[email protected]:5678/%s?%s' % ( url_quote(virtual_host, safe=''), query_string, )) @@ -733,8 +731,6 @@ class URLParametersTests(ParametersTestsBase): actual_value = getattr(params, t_param) if t_param == 'ssl_options': - self.assertEqual(actual_value.context.verify_mode, - expected_value['cert_reqs']) self.assertEqual(actual_value.server_hostname, expected_value['server_hostname']) else: @@ -749,6 +745,8 @@ class URLParametersTests(ParametersTestsBase): # check all values from base URL self.assertIsNotNone(params.ssl_options) + self.assertIsNotNone(params.ssl_options.context) + self.assertIsInstance(params.ssl_options.context, ssl.SSLContext) self.assertEqual(params.credentials.username, 'myuser') self.assertEqual(params.credentials.password, 'mypass') self.assertEqual(params.host, 'www.test.com')
Python 3.7.0 support There appear to be non-compatible changes in the `ssl` module. Travis CI should be updated as well.
0.0
e58f3a1995e9a9185cc13bd82732208a0604f881
[ "tests/unit/connection_parameters_tests.py::URLParametersTests::test_good_parameters" ]
[ "tests/unit/connection_parameters_tests.py::ParametersTests::test_backpressure_detection", "tests/unit/connection_parameters_tests.py::ParametersTests::test_blocked_connection_timeout", "tests/unit/connection_parameters_tests.py::ParametersTests::test_channel_max", "tests/unit/connection_parameters_tests.py::ParametersTests::test_connection_attempts", "tests/unit/connection_parameters_tests.py::ParametersTests::test_credentials", "tests/unit/connection_parameters_tests.py::ParametersTests::test_default_property_values", "tests/unit/connection_parameters_tests.py::ParametersTests::test_eq", "tests/unit/connection_parameters_tests.py::ParametersTests::test_frame_max", "tests/unit/connection_parameters_tests.py::ParametersTests::test_heartbeat", "tests/unit/connection_parameters_tests.py::ParametersTests::test_host", "tests/unit/connection_parameters_tests.py::ParametersTests::test_locale", "tests/unit/connection_parameters_tests.py::ParametersTests::test_ne", "tests/unit/connection_parameters_tests.py::ParametersTests::test_port", "tests/unit/connection_parameters_tests.py::ParametersTests::test_retry_delay", "tests/unit/connection_parameters_tests.py::ParametersTests::test_socket_timeout", "tests/unit/connection_parameters_tests.py::ParametersTests::test_ssl_options", "tests/unit/connection_parameters_tests.py::ParametersTests::test_tcp_options", "tests/unit/connection_parameters_tests.py::ParametersTests::test_virtual_host", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_bad_type_connection_parameters", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_callable_heartbeat", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_default_property_values", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_deprecated_heartbeat_interval", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_exlicit_none_socket_timeout", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_exlicit_none_stack_timeout", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_non_ssl_with_default_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_non_ssl_with_explict_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_ssl_with_default_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_ssl_with_explict_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_good_connection_parameters", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accept_plain_string_locale", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accept_unicode_locale", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accepts_plain_string_virtualhost", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accepts_unicode_string_virtualhost", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_accepts_blank_username_and_password", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_accepts_plain_string", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_accepts_unicode_string", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_default_property_values", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_deprecated_heartbeat_interval", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_no_ssl", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_no_url_scheme_defaults_to_plaintext", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_ssl", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_url_decodes_username_and_password", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_port_if_not_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_username_and_password_if_not_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_if_not_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_if_only_parameters_provided", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_if_only_slash_is_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_downcase", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_downcase_ending_with_slash", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_upcase", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_upcase_ending_with_slash" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2018-10-29 23:20:07+00:00
bsd-3-clause
4,548
pika__pika-1230
diff --git a/pika/connection.py b/pika/connection.py index e5f3cfa..aeddae3 100644 --- a/pika/connection.py +++ b/pika/connection.py @@ -1995,7 +1995,8 @@ class Connection(pika.compat.AbstractBase): """ LOGGER.info( 'AMQP stack terminated, failed to connect, or aborted: ' - 'error-arg=%r; pending-error=%r', error, self._error) + 'opened=%r, error-arg=%r; pending-error=%r', + self._opened, error, self._error) if error is not None: if self._error is not None: @@ -2016,11 +2017,11 @@ class Connection(pika.compat.AbstractBase): [spec.Connection.Close, spec.Connection.Start]) if self.params.blocked_connection_timeout is not None: - self._remove_callbacks( - 0, [spec.Connection.Blocked, spec.Connection.Unblocked]) + self._remove_callbacks(0, + [spec.Connection.Blocked, spec.Connection.Unblocked]) if not self._opened and isinstance(self._error, - exceptions.StreamLostError): + (exceptions.StreamLostError, exceptions.ConnectionClosedByBroker)): # Heuristically deduce error based on connection state if self.connection_state == self.CONNECTION_PROTOCOL: LOGGER.error('Probably incompatible Protocol Versions') diff --git a/pika/data.py b/pika/data.py index d4264d2..e50a7b2 100644 --- a/pika/data.py +++ b/pika/data.py @@ -273,9 +273,11 @@ def decode_value(encoded, offset): # pylint: disable=R0912,R0915 offset += 4 value = decimal.Decimal(raw) * (decimal.Decimal(10)**-decimals) - # Short String + # https://github.com/pika/pika/issues/1205 + # Short Signed Int elif kind == b's': - value, offset = decode_short_string(encoded, offset) + value = struct.unpack_from('>h', encoded, offset)[0] + offset += 2 # Long String elif kind == b'S':
pika/pika
724d8905b4d2527063f793a535521a45ba745d36
diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py index 5c8d542..b451541 100644 --- a/tests/acceptance/async_adapter_tests.py +++ b/tests/acceptance/async_adapter_tests.py @@ -910,14 +910,14 @@ class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): # pylint: disable class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 - DESCRIPTION = "Unknown vhost results in ConnectionClosedByBroker." + DESCRIPTION = "Unknown vhost results in ProbableAccessDeniedError." def start(self, *args, **kwargs): # pylint: disable=W0221 self.parameters.virtual_host = str(uuid.uuid4()) self.error_captured = None super(TestZ_AccessDenied, self).start(*args, **kwargs) self.assertIsInstance(self.error_captured, - pika.exceptions.ConnectionClosedByBroker) + pika.exceptions.ProbableAccessDeniedError) def on_open_error(self, connection, error): self.error_captured = error diff --git a/tests/unit/data_tests.py b/tests/unit/data_tests.py index fccd0a9..db8601f 100644 --- a/tests/unit/data_tests.py +++ b/tests/unit/data_tests.py @@ -35,16 +35,19 @@ class DataTests(unittest.TestCase): FIELD_TBL_ENCODED += b'\x05bytesx\x00\x00\x00\x06foobar' if PY3 else b'\x05bytesS\x00\x00\x00\x06foobar' FIELD_TBL_VALUE = OrderedDict( - [('array', [1, 2, 3]), ('boolval', True), ('decimal', - decimal.Decimal('3.14')), - ('decimal_too', decimal.Decimal('100')), ('dictval', { - 'foo': 'bar' - }), ('intval', 1), ('bigint', 2592000000), ('longval', - long(912598613)), ('null', - None), - ('strval', 'Test'), ('timestampval', - datetime.datetime(2006, 11, 21, 16, 30, - 10)), ('unicode', u'utf8=✓'), + [ + ('array', [1, 2, 3]), + ('boolval', True), + ('decimal', decimal.Decimal('3.14')), + ('decimal_too', decimal.Decimal('100')), + ('dictval', { 'foo': 'bar' }), + ('intval', 1), + ('bigint', 2592000000), + ('longval', long(912598613)), + ('null', None), + ('strval', 'Test'), + ('timestampval', datetime.datetime(2006, 11, 21, 16, 30, 10)), + ('unicode', u'utf8=✓'), ('bytes', b'foobar'), ]) @@ -56,6 +59,15 @@ class DataTests(unittest.TestCase): result = data.decode_table(input, 0) self.assertEqual(result, ({'bytes': b'foobar'}, 21)) + # b'\x08shortints\x04\xd2' + # ('shortint', 1234), + def test_decode_shortint(self): + input = ( + b'\x00\x00\x00\x01' + b'\x08shortints\x04\xd2' + ) + result = data.decode_table(input, 0) + self.assertEqual(result, ({'shortint': 1234}, 16)) def test_encode_table(self): result = []
InvalidFieldTypeException when receiving messages with int values in headers dict Pika throws an InvalidFieldTypeException on messages produced by haigha Python package with int values in the headers dictionary. Notes: RabbitMQ can handle the message generated by _haigha_ (See attached screenshot) Pika can process similar messages that are published by pika An example for a failing application_headers: ``` {'compressed': False, 'sequence': 5} ``` Stack trace: ``` File " \venv\lib\site-packages\pika\adapters\blocking_connection.py", line 1688, in _basic_consume_impl self._flush_output(ok_result.is_ready) File " \venv\lib\site-packages\pika\adapters\blocking_connection.py", line 1327, in _flush_output self._connection._flush_output(lambda: self.is_closed, *waiters) File " \venv\lib\site-packages\pika\adapters\blocking_connection.py", line 523, in _flush_output raise self._closed_result.value.error pika.exceptions.StreamLostError: Stream connection lost: InvalidFieldTypeException ![pika + haigha messages](https://user-images.githubusercontent.com/47942606/56371619-18635680-6206-11e9-804a-fcf1a2e0d1de.PNG) : Unsupported field kind b'' ``` Attachments: haigha_publisher.py : Publish messages that cause the error in pika receiver_2.py: pika receiver code publisher.py: pika publish code - receiver handles it pika + haigha messages.PNG: A screenshot of the messages sent by haigha and pika. [examples.zip](https://github.com/pika/pika/files/3095058/examples.zip) The following application_header schemas were received succesfully by pika: ``` {'compressed': False, }, {'compressed': False, 'sequence': '5'}, {'compressed': False, 'sequence': None}, {'compressed': False, 'encoding': 'json'}, {'compressed': False, 'encoding': 'json', 'sequence': ''}, {'compressed': False, 'encoding': 'json', 'sequence': '5'} ```
0.0
724d8905b4d2527063f793a535521a45ba745d36
[ "tests/unit/data_tests.py::DataTests::test_decode_shortint" ]
[ "tests/unit/data_tests.py::DataTests::test_decode_bytes", "tests/unit/data_tests.py::DataTests::test_decode_raises", "tests/unit/data_tests.py::DataTests::test_decode_table", "tests/unit/data_tests.py::DataTests::test_decode_table_bytes", "tests/unit/data_tests.py::DataTests::test_encode_raises", "tests/unit/data_tests.py::DataTests::test_encode_table", "tests/unit/data_tests.py::DataTests::test_encode_table_bytes" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-07-12 23:37:22+00:00
bsd-3-clause
4,549
pika__pika-1386
diff --git a/pika/connection.py b/pika/connection.py index b1278f3..c2f169f 100644 --- a/pika/connection.py +++ b/pika/connection.py @@ -903,9 +903,9 @@ class URLParameters(Parameters): # # SSLContext.load_verify_locations(cafile=None, capath=None, cadata=None) try: - opt_protocol = ssl.PROTOCOL_TLS + opt_protocol = ssl.PROTOCOL_TLS_CLIENT except AttributeError: - opt_protocol = ssl.PROTOCOL_TLSv1 + opt_protocol = ssl.PROTOCOL_TLSv1_2 if 'protocol' in opts: opt_protocol = opts['protocol']
pika/pika
8598474923fd7809eeeaf88253da3c9a27cbf09a
diff --git a/tests/unit/connection_parameters_tests.py b/tests/unit/connection_parameters_tests.py index 75d8a8a..ff8dc04 100644 --- a/tests/unit/connection_parameters_tests.py +++ b/tests/unit/connection_parameters_tests.py @@ -12,18 +12,16 @@ import pika from pika.compat import urlencode, url_quote, dict_iteritems from pika import channel, connection, credentials, spec - # disable missing-docstring # pylint: disable=C0111 # disable invalid-name # pylint: disable=C0103 - # Unordered sequence of connection.Parameters's property getters _ALL_PUBLIC_PARAMETERS_PROPERTIES = tuple( - attr for attr in vars(connection.Parameters) if not attr.startswith('_') - and issubclass(type(getattr(connection.Parameters, attr)), property)) + attr for attr in vars(connection.Parameters) if not attr.startswith('_') and + issubclass(type(getattr(connection.Parameters, attr)), property)) class ChildParameters(connection.Parameters): @@ -642,6 +640,20 @@ class URLParametersTests(ParametersTestsBase): params.port = params.DEFAULT_PORT self.assert_default_parameter_values(params) + def test_ssl_default_protocol_version_fallback(self): + """ + This test does not set protocol_version option in ssl_options. Instead, + it relies on connection.URLParameters class to setup the default, and + then it asserts the protocol is what we expected (auto or TLSv1_2) + """ + params = connection.URLParameters( + 'amqps://foo.bar/some-vhost?ssl_options=%7B%27ca_certs%27%3A%27testdata%2Fcerts%2Fca_certificate.pem%27%7D' + ) + self.assertTrue( + params.ssl_options.context.protocol == ssl.PROTOCOL_TLS_CLIENT or + params.ssl_options.context.protocol == ssl.PROTOCOL_TLSv1_2, + msg='Expected to fallback to a secure protocol') + def test_no_url_scheme_defaults_to_plaintext(self): params = connection.URLParameters('//') self.assertIsNone(params.ssl_options)
Update fallback SSL/TLS protocol version In the connection module, the method `URLParameters._set_url_ssl_options()` tries to set the secure protocol version to `ssl.PROTOCOL_TLS` and falls back to `ssl.PROTOCOL_TLSv1` if the first is not available (since it was introduced in 3.6 and this client supports 3.4+). https://github.com/pika/pika/blob/c34c411874f049f2048aa5133ea20f87c95fa223/pika/connection.py#L905-L912 This has the following issues: - The usage of `ssl.PROTOCOL_TLS` is [deprecated in 3.10](https://docs.python.org/3/library/ssl.html#ssl.PROTOCOL_TLS) in favour of ` ssl.PROTOCOL_TLS_CLIENT` - The fallback sets an unsecure protocol I propose the following changes: 1. Try to set [ssl.PROTOCOL_TLS_CLIENT](https://docs.python.org/3/library/ssl.html#ssl.PROTOCOL_TLS_CLIENT) 2. Fall back to [ssl.PROTOCOL_TLSv1_2](https://docs.python.org/3/library/ssl.html#ssl.PROTOCOL_TLSv1_2) Those changes would address the usage of deprecated `ssl.PROTOCOL_TLS`, and would fall back to a secure protocol by default. The constant `ssl.PROTOCOL_TLSv1_2` was introduced in Python 3.4, therefore it is safe to assume this constant will be there for supported Python versions.
0.0
8598474923fd7809eeeaf88253da3c9a27cbf09a
[ "tests/unit/connection_parameters_tests.py::URLParametersTests::test_ssl_default_protocol_version_fallback" ]
[ "tests/unit/connection_parameters_tests.py::ParametersTests::test_blocked_connection_timeout", "tests/unit/connection_parameters_tests.py::ParametersTests::test_channel_max", "tests/unit/connection_parameters_tests.py::ParametersTests::test_connection_attempts", "tests/unit/connection_parameters_tests.py::ParametersTests::test_credentials", "tests/unit/connection_parameters_tests.py::ParametersTests::test_default_property_values", "tests/unit/connection_parameters_tests.py::ParametersTests::test_eq", "tests/unit/connection_parameters_tests.py::ParametersTests::test_frame_max", "tests/unit/connection_parameters_tests.py::ParametersTests::test_heartbeat", "tests/unit/connection_parameters_tests.py::ParametersTests::test_host", "tests/unit/connection_parameters_tests.py::ParametersTests::test_locale", "tests/unit/connection_parameters_tests.py::ParametersTests::test_ne", "tests/unit/connection_parameters_tests.py::ParametersTests::test_port", "tests/unit/connection_parameters_tests.py::ParametersTests::test_retry_delay", "tests/unit/connection_parameters_tests.py::ParametersTests::test_socket_timeout", "tests/unit/connection_parameters_tests.py::ParametersTests::test_ssl_options", "tests/unit/connection_parameters_tests.py::ParametersTests::test_tcp_options", "tests/unit/connection_parameters_tests.py::ParametersTests::test_virtual_host", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_bad_type_connection_parameters", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_callable_heartbeat", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_default_property_values", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_exlicit_none_socket_timeout", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_exlicit_none_stack_timeout", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_non_ssl_with_default_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_non_ssl_with_explict_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_ssl_with_default_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_explicit_ssl_with_explict_port", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_good_connection_parameters", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accept_plain_string_locale", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accept_unicode_locale", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accepts_plain_string_virtualhost", "tests/unit/connection_parameters_tests.py::ConnectionParametersTests::test_parameters_accepts_unicode_string_virtualhost", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_accepts_blank_username_and_password", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_accepts_plain_string", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_accepts_unicode_string", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_default_property_values", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_good_parameters", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_no_ssl", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_no_url_scheme_defaults_to_plaintext", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_ssl", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_url_decodes_username_and_password", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_port_if_not_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_username_and_password_if_not_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_if_not_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_if_only_parameters_provided", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_if_only_slash_is_specified", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_downcase", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_downcase_ending_with_slash", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_upcase", "tests/unit/connection_parameters_tests.py::URLParametersTests::test_uses_default_virtual_host_via_encoded_slash_upcase_ending_with_slash" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2022-08-22 16:30:09+00:00
bsd-3-clause
4,550
pimutils__khal-1074
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2c26869..7ba8e17 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,8 @@ not released * CHANGE ikhal: tab (and shift tab) jump from the events back to the calendar * NEW Add symbol for events with at least one alarm * FIX URL can now be set/updated from ikhal +* FIX Imported events without an end or duration will now last one day if + `DTSTART` is a date (as per RFC) or one hour if it is a datetime. 0.10.3 ====== diff --git a/khal/icalendar.py b/khal/icalendar.py index a35fd2f..22eee2e 100644 --- a/khal/icalendar.py +++ b/khal/icalendar.py @@ -413,8 +413,9 @@ def sanitize_timerange(dtstart, dtend, duration=None): if dtend is None and duration is None: if isinstance(dtstart, dt.datetime): - dtstart = dtstart.date() - dtend = dtstart + dt.timedelta(days=1) + dtend = dtstart + dt.timedelta(hours=1) + else: + dtend = dtstart + dt.timedelta(days=1) elif dtend is not None: if dtend < dtstart: raise ValueError('The event\'s end time (DTEND) is older than ' @@ -424,7 +425,10 @@ def sanitize_timerange(dtstart, dtend, duration=None): "Event start time and end time are the same. " "Assuming the event's duration is one hour." ) - dtend += dt.timedelta(hours=1) + if isinstance(dtstart, dt.datetime): + dtend += dt.timedelta(hours=1) + else: + dtend += dt.timedelta(days=1) return dtstart, dtend
pimutils/khal
4493273c5e6afd2ca95b756105e8c747e176a642
diff --git a/tests/backend_test.py b/tests/backend_test.py index f966464..07f5b94 100644 --- a/tests/backend_test.py +++ b/tests/backend_test.py @@ -564,11 +564,13 @@ def test_no_dtend(): """test support for events with no dtend""" db = backend.SQLiteDb([calname], ':memory:', locale=LOCALE_BERLIN) db.update(_get_text('event_dt_no_end'), href='event_dt_no_end', calendar=calname) - events = db.get_floating( - dt.datetime(2016, 1, 16, 0, 0), dt.datetime(2016, 1, 17, 0, 0)) + events = db.get_localized( + BERLIN.localize(dt.datetime(2016, 1, 16, 0, 0)), + BERLIN.localize(dt.datetime(2016, 1, 17, 0, 0)), + ) event = list(events)[0] - assert event[2] == dt.date(2016, 1, 16) - assert event[3] == dt.date(2016, 1, 17) + assert event[2] == BERLIN.localize(dt.datetime(2016, 1, 16, 8, 0)) + assert event[3] == BERLIN.localize(dt.datetime(2016, 1, 16, 9, 0)) event_rdate_period = """BEGIN:VEVENT diff --git a/tests/khalendar_utils_test.py b/tests/khalendar_utils_test.py index cbe35d7..ed81e88 100644 --- a/tests/khalendar_utils_test.py +++ b/tests/khalendar_utils_test.py @@ -791,8 +791,8 @@ class TestSanitize: def test_noend_datetime(self): vevent = _get_vevent(noend_datetime) vevent = icalendar_helpers.sanitize(vevent, berlin, '', '') - assert vevent['DTSTART'].dt == dt.date(2014, 8, 29) - assert vevent['DTEND'].dt == dt.date(2014, 8, 30) + assert vevent['DTSTART'].dt == BERLIN.localize(dt.datetime(2014, 8, 29, 8)) + assert vevent['DTEND'].dt == BERLIN.localize(dt.datetime(2014, 8, 29, 9)) def test_duration(self): vevent = _get_vevent_file('event_dtr_exdatez')
How to display the hour in ikhal (+ is there a dicussion forum for basic question) Hello again. I'm sorry to bother the issue with a rather basic question. I want to output event with their hour time with `ikhal`. I could do it with the command: ``` khal list --format "{title} {start-time-full}" ``` I tried to modify the conf file in several ways without success: ``` [calendars] [[private]] path = /path/to/calendar/ type = calendar color = dark green [locale] timeformat = %H:%M dateformat = %d/%m/%Y longdateformat = %d/%m/%Y datetimeformat = %d/%m/%Y %H:%M longdatetimeformat = %d/%m/%Y %H:%M [default] default_calendar = private ``` I tried to read the doc [here](https://khal.readthedocs.io/en/latest/usage.html#) and [there](https://khal.readthedocs.io/en/latest/configure.html) but unfortunately could not find how to implement the answer More generally is there a dicussion forum for basic questions as this one? Many thanks in advance
0.0
4493273c5e6afd2ca95b756105e8c747e176a642
[ "tests/backend_test.py::test_no_dtend", "tests/khalendar_utils_test.py::TestSanitize::test_noend_datetime" ]
[ "tests/backend_test.py::test_new_db_version", "tests/backend_test.py::test_event_rrule_recurrence_id", "tests/backend_test.py::test_event_rrule_recurrence_id_invalid_tzid", "tests/backend_test.py::test_event_rrule_recurrence_id_reverse", "tests/backend_test.py::test_event_rrule_recurrence_id_update_with_exclude", "tests/backend_test.py::test_event_recuid_no_master", "tests/backend_test.py::test_event_recuid_rrule_no_master", "tests/backend_test.py::test_no_valid_timezone", "tests/backend_test.py::test_event_delete", "tests/backend_test.py::test_this_and_prior", "tests/backend_test.py::test_event_rrule_this_and_future", "tests/backend_test.py::test_event_rrule_this_and_future_multi_day_shift", "tests/backend_test.py::test_event_rrule_this_and_future_allday", "tests/backend_test.py::test_event_rrule_this_and_future_allday_prior", "tests/backend_test.py::test_event_rrule_multi_this_and_future_allday", "tests/backend_test.py::test_calc_shift_deltas", "tests/backend_test.py::test_two_calendars_same_uid", "tests/backend_test.py::test_update_one_should_not_affect_others", "tests/backend_test.py::test_check_support", "tests/backend_test.py::test_check_support_rdate_no_values", "tests/backend_test.py::test_birthdays", "tests/backend_test.py::test_birthdays_update", "tests/backend_test.py::test_birthdays_no_fn", "tests/backend_test.py::test_birthday_does_not_parse", "tests/backend_test.py::test_vcard_two_birthdays", "tests/backend_test.py::test_anniversary", "tests/backend_test.py::test_abdate", "tests/backend_test.py::test_abdate_nolabel", "tests/backend_test.py::test_birthday_v3", "tests/khalendar_utils_test.py::TestExpand::test_expand_dt", "tests/khalendar_utils_test.py::TestExpand::test_expand_dtb", "tests/khalendar_utils_test.py::TestExpand::test_expand_dttz", "tests/khalendar_utils_test.py::TestExpand::test_expand_dtf", "tests/khalendar_utils_test.py::TestExpand::test_expand_d", "tests/khalendar_utils_test.py::TestExpand::test_expand_dtz", "tests/khalendar_utils_test.py::TestExpand::test_expand_dtzb", "tests/khalendar_utils_test.py::TestExpand::test_expand_invalid_exdate", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_dt", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_dtb", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_dttz", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_dtf", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_d", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_dtr_exdatez", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_rrule_exdate_z", "tests/khalendar_utils_test.py::TestExpandNoRR::test_expand_rrule_notz_until_z", "tests/khalendar_utils_test.py::TestSpecial::test_count", "tests/khalendar_utils_test.py::TestSpecial::test_until_notz", "tests/khalendar_utils_test.py::TestSpecial::test_until_d_notz", "tests/khalendar_utils_test.py::TestSpecial::test_latest_bug", "tests/khalendar_utils_test.py::TestSpecial::test_recurrence_id_with_timezone", "tests/khalendar_utils_test.py::TestSpecial::test_event_exdate_dt", "tests/khalendar_utils_test.py::TestSpecial::test_event_exdates_dt", "tests/khalendar_utils_test.py::TestSpecial::test_event_exdatesl_dt", "tests/khalendar_utils_test.py::TestSpecial::test_event_exdates_remove", "tests/khalendar_utils_test.py::TestSpecial::test_event_dt_rrule_invalid_until", "tests/khalendar_utils_test.py::TestSpecial::test_event_dt_rrule_invalid_until2", "tests/khalendar_utils_test.py::TestSpecial::test_event_dt_rrule_until_before_start", "tests/khalendar_utils_test.py::TestSpecial::test_event_invalid_rrule", "tests/khalendar_utils_test.py::TestRDate::test_simple_rdate", "tests/khalendar_utils_test.py::TestRDate::test_rrule_and_rdate", "tests/khalendar_utils_test.py::TestRDate::test_rrule_past", "tests/khalendar_utils_test.py::TestRDate::test_rdate_date", "tests/khalendar_utils_test.py::TestSanitize::test_noend_date", "tests/khalendar_utils_test.py::TestSanitize::test_duration", "tests/khalendar_utils_test.py::TestSanitize::test_instant", "tests/khalendar_utils_test.py::TestIsAware::test_naive", "tests/khalendar_utils_test.py::TestIsAware::test_berlin", "tests/khalendar_utils_test.py::TestIsAware::test_bogota", "tests/khalendar_utils_test.py::TestIsAware::test_utc" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2021-07-24 22:48:55+00:00
mit
4,551
pimutils__khal-1145
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c03753d..843d9cf 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,7 @@ may want to subscribe to `GitHub's tag feed not released * FIX support for tzlocal >= 4.0 +* FIX ability to show event's calendar in ikhal * NEW Add widget to interactive event editor that allows adding attendees as comma separated list of email addresses * NEW Add support for Python 3.10 diff --git a/khal/khalendar/event.py b/khal/khalendar/event.py index c6fc795..1801aab 100644 --- a/khal/khalendar/event.py +++ b/khal/khalendar/event.py @@ -698,6 +698,7 @@ class Event: attributes["calendar"] = cal.get("displayname", self.calendar) else: attributes["calendar-color"] = attributes["calendar"] = '' + attributes["calendar"] = self.calendar if colors: attributes['reset'] = style('', reset=True)
pimutils/khal
777e5b494902daf3fb9eaf95cc0ee77a8483d4e9
diff --git a/tests/event_test.py b/tests/event_test.py index 6b52d8a..e3014dd 100644 --- a/tests/event_test.py +++ b/tests/event_test.py @@ -16,6 +16,8 @@ EVENT_KWARGS = {'calendar': 'foobar', 'locale': LOCALE_BERLIN} LIST_FORMAT = '{calendar-color}{cancelled}{start-end-time-style} {title}{repeat-symbol}' SEARCH_FORMAT = '{calendar-color}{cancelled}{start-long}{to-style}' + \ '{end-necessary-long} {title}{repeat-symbol}' +FORMAT_CALENDAR = ('{calendar-color}{cancelled}{start-end-time-style} ({calendar}) ' + '{title} [{location}]{repeat-symbol}') def test_no_initialization(): @@ -47,6 +49,19 @@ def test_raw_dt(): assert event.organizer == '' +def test_calendar_in_format(): + """test if the calendar is included in event.format() if specified in the FORMAT + + see #1121 + """ + event_dt = _get_text('event_dt_simple') + start = BERLIN.localize(dt.datetime(2014, 4, 9, 9, 30)) + end = BERLIN.localize(dt.datetime(2014, 4, 9, 10, 30)) + event = Event.fromString(event_dt, start=start, end=end, **EVENT_KWARGS) + assert event.format(FORMAT_CALENDAR, dt.date(2014, 4, 9)) == \ + '09:30-10:30 (foobar) An Event []\x1b[0m' + + def test_update_simple(): event = Event.fromString(_get_text('event_dt_simple'), **EVENT_KWARGS) event_updated = Event.fromString(_get_text('event_dt_simple_updated'), **EVENT_KWARGS)
`ikhal` not respecting `{calendar}` in `agenda_event_format` First of all: Thank you for making and maintaining `khal`. I use it everyday! According to [the docs](https://lostpackets.de/khal/configure.html#the-view-section), setting ``` agenda_event_format = {calendar-color}{cancelled}{start-end-time-style} ({calendar}) {title} [{location}]{repeat-symbol}{reset} ``` should be respected by both `khal list` and `ikhal`. However, in `ikal`, I only get output such as this: ``` <calendary stuff> Today (2022-04-08) … 8:00-15:00 () Open issue for ikhal [living room] </calendary stuff> ``` Note the empty parentheses in the “Open issue for ikhal” event. I would've expected to have the name of the calendar there since `agenda_event_format` specifies `({calendar})` (which *does* work as expected in `khal list`).
0.0
777e5b494902daf3fb9eaf95cc0ee77a8483d4e9
[ "tests/event_test.py::test_calendar_in_format" ]
[ "tests/event_test.py::test_event_organizer", "tests/event_test.py::test_cancelled_instance", "tests/event_test.py::test_remove_instance_from_rrule", "tests/event_test.py::test_raw_d", "tests/event_test.py::test_update_sequence", "tests/event_test.py::test_format_colors", "tests/event_test.py::test_get_url", "tests/event_test.py::test_no_end", "tests/event_test.py::test_event_dt_duration", "tests/event_test.py::test_remove_existing_location_if_set_to_empty", "tests/event_test.py::test_remove_instance_from_rdate", "tests/event_test.py::test_event_d_long", "tests/event_test.py::test_event_alarm", "tests/event_test.py::test_do_not_save_empty_description", "tests/event_test.py::test_recur", "tests/event_test.py::test_no_initialization", "tests/event_test.py::test_invalid_format_string", "tests/event_test.py::test_sort_event_start", "tests/event_test.py::test_event_rd", "tests/event_test.py::test_dtend_equals_dtstart", "tests/event_test.py::test_multi_uid", "tests/event_test.py::test_event_dt_long", "tests/event_test.py::test_do_not_save_empty_url", "tests/event_test.py::test_format_24", "tests/event_test.py::test_sort_event_summary", "tests/event_test.py::test_zulu_events", "tests/event_test.py::test_update_event_d", "tests/event_test.py::test_sort_event_end", "tests/event_test.py::test_event_dt_floating", "tests/event_test.py::test_remove_existing_url_if_set_to_empty", "tests/event_test.py::test_update_simple", "tests/event_test.py::test_update_remove_categories", "tests/event_test.py::test_event_d_rr", "tests/event_test.py::test_update_event_duration", "tests/event_test.py::test_event_raw_UTC", "tests/event_test.py::test_event_d_two_days", "tests/event_test.py::test_remove_instance_from_two_rdate", "tests/event_test.py::test_event_dt_tz_missing", "tests/event_test.py::test_do_not_save_empty_location", "tests/event_test.py::test_type_inference", "tests/event_test.py::test_remove_existing_description_if_set_to_empty", "tests/event_test.py::test_add_url", "tests/event_test.py::test_event_attendees", "tests/event_test.py::test_invalid_keyword_argument", "tests/event_test.py::test_sort_date_vs_datetime", "tests/event_test.py::test_duplicate_event", "tests/event_test.py::test_remove_instance_from_recuid", "tests/event_test.py::test_event_dt_rr" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-06-24 12:20:53+00:00
mit
4,552
pimutils__khal-1166
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7021c0c..8b8d2ef 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -20,6 +20,7 @@ not released anymore * NEW the `configure` command can now set up vdirsyncer * NEW better error message for misuses of `at` and `list` +* NEW `discover` collection type now supports `**` (arbitrary depths) 0.10.5 ====== diff --git a/khal/settings/khal.spec b/khal/settings/khal.spec index 85c93df..be60a07 100644 --- a/khal/settings/khal.spec +++ b/khal/settings/khal.spec @@ -10,6 +10,7 @@ # The path to an existing directory where this calendar is saved as a *vdir*. # The directory is searched for events or birthdays (see ``type``). The path # also accepts glob expansion via `*` or `?` when type is set to discover. +# `**` means arbitrary depths of directories. # This allows for paths such as `~/accounts/*/calendars/*`, where the # calendars directory contains vdir directories. In addition, `~/calendars/*` # and `~/calendars/default` are valid paths if there exists a vdir in the diff --git a/khal/settings/utils.py b/khal/settings/utils.py index bd1281a..f01c26f 100644 --- a/khal/settings/utils.py +++ b/khal/settings/utils.py @@ -24,6 +24,7 @@ import datetime as dt import glob import logging import os +import pathlib from os.path import expanduser, expandvars, join from typing import Callable, Iterable, Literal, Optional, Union @@ -189,12 +190,25 @@ def get_unique_name(path: str, names: Iterable[str]) -> str: return name -def get_all_vdirs(path: str) -> Iterable[str]: +def get_all_vdirs(expand_path: str) -> Iterable[str]: """returns a list of paths, expanded using glob """ - # FIXME currently returns a list of all files in path - items = glob.glob(path) - return items + # FIXME currently returns a list of all directories in path + # we add an additional / at the end to make sure we are only getting + # directories + items = glob.glob(f'{expand_path}/', recursive=True) + paths = [pathlib.Path(item) for item in sorted(items, key=len, reverse=True)] + leaves = set() + parents = set() + for path in paths: + if path in parents: + # we have already seen the current directory as the parent of + # another directory, so this directory can't be a vdir + continue + parents.add(path.parent) + leaves.add(path) + # sort to make sure that auto generated names are always identical + return sorted(os.fspath(path) for path in leaves) def get_vdir_type(_: str) -> str:
pimutils/khal
0e6ed004102c12a4324c0d2cedd3b2309bdd5c7b
diff --git a/tests/settings_test.py b/tests/settings_test.py index 5ca49cc..1d937bd 100644 --- a/tests/settings_test.py +++ b/tests/settings_test.py @@ -172,6 +172,7 @@ def metavdirs(tmpdir): '/cal4/dircolor/', '/cal4/cfgcolor_again/', '/cal4/cfgcolor_once_more/', + '/singlecollection/', ] for one in dirstructure: os.makedirs(tmpdir + one) @@ -198,25 +199,30 @@ def test_broken_color(metavdirs): def test_discover(metavdirs): - path = metavdirs - vdirs = {vdir[len(path):] for vdir in get_all_vdirs(path + '/*/*')} - assert vdirs == { + test_vdirs = { '/cal1/public', '/cal1/private', '/cal2/public', '/cal3/home', '/cal3/public', '/cal3/work', - '/cal4/cfgcolor', '/cal4/dircolor', '/cal4/cfgcolor_again', '/cal4/cfgcolor_once_more' + '/cal4/cfgcolor', '/cal4/dircolor', '/cal4/cfgcolor_again', + '/cal4/cfgcolor_once_more', + '/singlecollection', } + path = metavdirs + assert test_vdirs == {vdir[len(path):] for vdir in get_all_vdirs(path + '/**/*/')} + assert test_vdirs == {vdir[len(path):] for vdir in get_all_vdirs(path + '/**/')} + assert test_vdirs == {vdir[len(path):] for vdir in get_all_vdirs(path + '/**/*')} def test_get_unique_name(metavdirs): path = metavdirs - vdirs = list(get_all_vdirs(path + '/*/*')) + vdirs = list(get_all_vdirs(path + '/**/')) names = [] for vdir in sorted(vdirs): names.append(get_unique_name(vdir, names)) - assert names == [ + assert sorted(names) == sorted([ 'my private calendar', 'my calendar', 'public', 'home', 'public1', 'work', 'cfgcolor', 'cfgcolor_again', 'cfgcolor_once_more', 'dircolor', - ] + 'singlecollection', + ]) def test_config_checks(metavdirs): @@ -260,14 +266,14 @@ def test_config_checks(metavdirs): }, 'public1': { 'color': None, - 'path': '/cal2/public', + 'path': '/cal3/public', 'readonly': False, 'type': 'calendar', 'priority': 10, }, 'public': { 'color': None, - 'path': '/cal3/public', + 'path': '/cal2/public', 'readonly': False, 'type': 'calendar', 'priority': 10,
calendar discovery glob doesn't handle recursion When using a glob'd directory path, I'm encountering an exception with getting it to parse nessted vdirs correctly. This is the layout of the directory that `vdirsyncer` currently syncs all my calendars to: ``` /home/demi/Calendars/fastmail/ ├── Cocoaheads │   └── Boston # vdir │ ├── Games │   ├── General │ │ └── Events # vdir │   ├── Splatoon2 │ │ ├── Salmon-Run # vdir │ │ └── Festivals # vdir │   └── The100 # vdir │ ├── Holidays │   ├── Japan # vdir │   ├── UK # vdir │   └── USA # vdir │ ├── Medical │   └── Reminders # vdir │ ├── Personal │   ├── Deliveries # vdir │   └── Schedule # vdir │ └── Work └── Events # vdir ``` The problem occurs when I try to specify a search path for one of these top-level directories (`Cocoaheads`/`Games`/`Holidays`/`Medical`/`Personal`/`Work`) where the level of nesting that the vdirs have is not all the same. so in this case the top-level directory `Games` has vdirs at depths of 1 and 2, so when I specify a glob to accommodate that: ``` [[Games]] path = ~/Calendars/fastmail/Games/*/* type = discover readonly = True ``` then `khal` tries to treat everything at a depth of 2 to be a vdir, regardless if it is a file or directory. thus at startup I get an exception thrown because the `ics` files in the highest vdir are trying to be parsed as a vdir. I guess this isn't technically a bug in that regular because i'm telling `khal` that is where the vdirs are, but I don't know how to write the path so that `khal` will search the given path for vdirs and then classify the contents of the found vdirs under one calendar name in the application. if the special glob pattern `**` could be used to do this, or however to denote that kind of behavior, that would be awesome, but thus far I cannot figure out how to do that. --- Version: `khal v0.10.1`
0.0
0e6ed004102c12a4324c0d2cedd3b2309bdd5c7b
[ "tests/settings_test.py::test_discover", "tests/settings_test.py::test_get_unique_name" ]
[ "tests/settings_test.py::TestSettings::test_simple_config", "tests/settings_test.py::TestSettings::test_nocalendars", "tests/settings_test.py::TestSettings::test_one_level_calendar", "tests/settings_test.py::TestSettings::test_small", "tests/settings_test.py::TestSettings::test_old_config", "tests/settings_test.py::TestSettings::test_extra_sections", "tests/settings_test.py::TestSettings::test_default_calendar_readonly", "tests/settings_test.py::test_broken_color", "tests/settings_test.py::test_config_checks", "tests/settings_test.py::test_is_color" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-07-11 20:02:46+00:00
mit
4,553
pimutils__khal-1295
diff --git a/.gitignore b/.gitignore index 1920041..d022b15 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ env/ venv/ .hypothesis/ .python-version +.dmypy.json diff --git a/AUTHORS.txt b/AUTHORS.txt index a312f47..1f5297d 100644 --- a/AUTHORS.txt +++ b/AUTHORS.txt @@ -52,3 +52,4 @@ Jason Cox - me [at] jasoncarloscox [dot] com - https://jasoncarloscox.com Michael Tretter - michael.tretter [at] posteo [dot] net Raúl Medina - raulmgcontact [at] gmail (dot] com Matthew Rademaker - matthew.rademaker [at] gmail [dot] com +Valentin Iovene - val [at] too [dot] gy diff --git a/khal.conf.sample b/khal.conf.sample index 243bb55..494a2da 100644 --- a/khal.conf.sample +++ b/khal.conf.sample @@ -30,3 +30,4 @@ monthdisplay = firstday default_calendar = home timedelta = 2d # the default timedelta that list uses highlight_event_days = True # the default is False +enable_mouse = True # mouse is enabled by default in interactive mode diff --git a/khal/cli.py b/khal/cli.py index 8b920f5..538ff9d 100644 --- a/khal/cli.py +++ b/khal/cli.py @@ -93,6 +93,16 @@ def multi_calendar_option(f): return d(a(f)) +def mouse_option(f): + o = click.option( + '--mouse/--no-mouse', + is_flag=True, + default=None, + help='Disable mouse in interactive UI' + ) + return o(f) + + def _select_one_calendar_callback(ctx, option, calendar): if isinstance(calendar, tuple): if len(calendar) > 1: @@ -480,9 +490,12 @@ def _get_cli(): @cli.command() @multi_calendar_option + @mouse_option @click.pass_context - def interactive(ctx, include_calendar, exclude_calendar): + def interactive(ctx, include_calendar, exclude_calendar, mouse): '''Interactive UI. Also launchable via `ikhal`.''' + if mouse is not None: + ctx.obj['conf']['default']['enable_mouse'] = mouse controllers.interactive( build_collection( ctx.obj['conf'], @@ -494,10 +507,13 @@ def _get_cli(): @click.command() @global_options @multi_calendar_option + @mouse_option @click.pass_context - def interactive_cli(ctx, config, include_calendar, exclude_calendar): + def interactive_cli(ctx, config, include_calendar, exclude_calendar, mouse): '''Interactive UI. Also launchable via `khal interactive`.''' prepare_context(ctx, config) + if mouse is not None: + ctx.obj['conf']['default']['enable_mouse'] = mouse controllers.interactive( build_collection( ctx.obj['conf'], diff --git a/khal/settings/khal.spec b/khal/settings/khal.spec index 789b103..fb9e50f 100644 --- a/khal/settings/khal.spec +++ b/khal/settings/khal.spec @@ -216,6 +216,10 @@ default_event_duration = timedelta(default='1d') # Define the default duration for an event ('khal new' only) default_dayevent_duration = timedelta(default='1h') +# Whether the mouse should be enabled in interactive mode ('khal interactive' and +# 'ikhal' only) +enable_mouse = boolean(default=True) + # The view section contains configuration options that effect the visual appearance # when using khal and ikhal. diff --git a/khal/ui/__init__.py b/khal/ui/__init__.py index b3f1118..1a93f78 100644 --- a/khal/ui/__init__.py +++ b/khal/ui/__init__.py @@ -1345,7 +1345,12 @@ def start_pane(pane, callback, program_info='', quit_keys=None): palette = _add_calendar_colors( getattr(colors, pane._conf['view']['theme']), pane.collection) loop = urwid.MainLoop( - frame, palette, unhandled_input=frame.on_key_press, pop_ups=True) + widget=frame, + palette=palette, + unhandled_input=frame.on_key_press, + pop_ups=True, + handle_mouse=pane._conf['default']['enable_mouse'], + ) frame.loop = loop def redraw_today(loop, pane, meta=None):
pimutils/khal
0395b57d2fd865b96d062640783b88d691e0db29
diff --git a/tests/settings_test.py b/tests/settings_test.py index 22dec97..586e696 100644 --- a/tests/settings_test.py +++ b/tests/settings_test.py @@ -55,6 +55,7 @@ class TestSettings: 'default_event_duration': dt.timedelta(days=1), 'default_dayevent_duration': dt.timedelta(hours=1), 'show_all_days': False, + 'enable_mouse': True, } } for key in comp_config: @@ -102,8 +103,8 @@ class TestSettings: 'timedelta': dt.timedelta(days=2), 'default_event_duration': dt.timedelta(days=1), 'default_dayevent_duration': dt.timedelta(hours=1), - - 'show_all_days': False + 'show_all_days': False, + 'enable_mouse': True, } } for key in comp_config:
[feature request] allow disabling mouse input from the `man` page: > interactive > invokes the interactive version of khal, can also be invoked by calling ikhal. While ikhal can be used entirely with the keyboard, some elements respond if clicked on with a mouse (mostly by being selected). I don't use the mouse with any application which can be used entirely with the keyboard. For instance `htop` and `fzf` have an option (a flag) to disable mouse support. ``` fzf --no-mouse htop --no-mouse ```
0.0
0395b57d2fd865b96d062640783b88d691e0db29
[ "tests/settings_test.py::TestSettings::test_simple_config", "tests/settings_test.py::TestSettings::test_small" ]
[ "tests/settings_test.py::TestSettings::test_nocalendars", "tests/settings_test.py::TestSettings::test_one_level_calendar", "tests/settings_test.py::TestSettings::test_old_config", "tests/settings_test.py::TestSettings::test_extra_sections", "tests/settings_test.py::TestSettings::test_default_calendar_readonly", "tests/settings_test.py::test_broken_color", "tests/settings_test.py::test_discover", "tests/settings_test.py::test_get_unique_name", "tests/settings_test.py::test_config_checks", "tests/settings_test.py::test_is_color" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-09-16 16:46:35+00:00
mit
4,554
pimutils__khal-1305
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2731900..fdf730a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,6 +16,7 @@ not released yet an event * NEW properties of ikhal themes (dark and light) can now be overriden from the config file (via the new [palette] section, check the documenation) +* NEW timedelta strings can now have a leading `+`, e.g. `+1d` 0.11.2 ====== diff --git a/khal/parse_datetime.py b/khal/parse_datetime.py index c3915b2..c060579 100644 --- a/khal/parse_datetime.py +++ b/khal/parse_datetime.py @@ -289,7 +289,7 @@ def guesstimedeltafstr(delta_string: str) -> dt.timedelta: :param delta_string: string encoding time-delta, e.g. '1h 15m' """ - tups = re.split(r'(-?\d+)', delta_string) + tups = re.split(r'(-?\+?\d+)', delta_string) if not re.match(r'^\s*$', tups[0]): raise ValueError(f'Invalid beginning of timedelta string "{delta_string}": "{tups[0]}"') tups = tups[1:] @@ -297,6 +297,8 @@ def guesstimedeltafstr(delta_string: str) -> dt.timedelta: for num, unit in zip(tups[0::2], tups[1::2]): try: + if num[0] == '+': + num = num[1:] numint = int(num) except ValueError: raise DateTimeParseError(
pimutils/khal
25c5c2671908a72f82f4d2add02068374ffb3242
diff --git a/tests/parse_datetime_test.py b/tests/parse_datetime_test.py index e40507a..1dd9cd8 100644 --- a/tests/parse_datetime_test.py +++ b/tests/parse_datetime_test.py @@ -193,6 +193,15 @@ class TestGuessTimedeltafstr: def test_seconds(self): assert dt.timedelta(seconds=10) == guesstimedeltafstr('10s') + def test_single_plus(self): + assert dt.timedelta(minutes=10) == guesstimedeltafstr('+10m') + + def test_seconds_plus(self): + assert dt.timedelta(seconds=10) == guesstimedeltafstr('+10s') + + def test_days_plus(self): + assert dt.timedelta(days=10) == guesstimedeltafstr('+10days') + def test_negative(self): assert dt.timedelta(minutes=-10) == guesstimedeltafstr('-10m') @@ -200,6 +209,14 @@ class TestGuessTimedeltafstr: assert dt.timedelta(days=1, hours=-3, minutes=10) == \ guesstimedeltafstr(' 1d -3H 10min ') + def test_multi_plus(self): + assert dt.timedelta(days=1, hours=3, minutes=10) == \ + guesstimedeltafstr(' 1d +3H 10min ') + + def test_multi_plus_minus(self): + assert dt.timedelta(days=0, hours=21, minutes=10) == \ + guesstimedeltafstr('+1d -3H 10min ') + def test_multi_nospace(self): assert dt.timedelta(days=1, hours=-3, minutes=10) == \ guesstimedeltafstr('1D-3hour10m')
Permissive parsing of DELTA I often enter the DELTA using `khal add` with a leading plus, e.g. because it's how stuff works in TaskWarrior, among others. So: `khal add tomorrow 10:00 +90m Test`, but this generates an event with title "+90m Test". My suggestion would be to swallow a leading `+` in what might be a DELTA value, and try both. Happy to try to submit a patch.
0.0
25c5c2671908a72f82f4d2add02068374ffb3242
[ "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_seconds_plus", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_multi_plus", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_multi_plus_minus", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_days_plus", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_single_plus" ]
[ "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_today", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_short_format_contains_year", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_time_yesterday", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_long_not_configured", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_tomorrow", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_time_weekday", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_time_now", "tests/parse_datetime_test.py::TestGuessDatetimefstr::test_time_tomorrow", "tests/parse_datetime_test.py::TestTimeDelta2Str::test_negative", "tests/parse_datetime_test.py::TestTimeDelta2Str::test_single", "tests/parse_datetime_test.py::TestTimeDelta2Str::test_days", "tests/parse_datetime_test.py::TestTimeDelta2Str::test_multi", "tests/parse_datetime_test.py::test_construct_daynames", "tests/parse_datetime_test.py::test_weekdaypstr", "tests/parse_datetime_test.py::test_weekdaypstr_invalid", "tests/parse_datetime_test.py::test_repeat_localized", "tests/parse_datetime_test.py::test_leap_year", "tests/parse_datetime_test.py::test__construct_event_format_us", "tests/parse_datetime_test.py::test_alarm", "tests/parse_datetime_test.py::test_description_and_location_and_categories", "tests/parse_datetime_test.py::test_repeat_floating", "tests/parse_datetime_test.py::test__construct_event_format_de_complexer", "tests/parse_datetime_test.py::test_construct_event_format_de", "tests/parse_datetime_test.py::test_description", "tests/parse_datetime_test.py::TestGuessRangefstr::test_short_format_contains_year", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_delta_3d", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_delta_1d", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_eod", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_dt_and_delta", "tests/parse_datetime_test.py::TestGuessRangefstr::test_today", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_allday_and_delta_datetime", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_end_date_time", "tests/parse_datetime_test.py::TestGuessRangefstr::test_tomorrow", "tests/parse_datetime_test.py::TestGuessRangefstr::test_invalid", "tests/parse_datetime_test.py::TestGuessRangefstr::test_week", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_zero_day_delta", "tests/parse_datetime_test.py::TestGuessRangefstr::test_time_tomorrow", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_week", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_no_end_date", "tests/parse_datetime_test.py::TestGuessRangefstr::test_start_and_end_date", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_negative", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_seconds", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_multi", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_multi_nospace", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_same", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_moregarbage", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_garbage", "tests/parse_datetime_test.py::TestGuessTimedeltafstr::test_single" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-10-27 12:45:14+00:00
mit
4,555
pimutils__khal-494
diff --git a/khal/aux.py b/khal/aux.py index 6d61e1c..45bec4d 100644 --- a/khal/aux.py +++ b/khal/aux.py @@ -340,6 +340,7 @@ def guessrangefstr(daterange, locale, default_timedelta=None, adjust_reasonably= start = datetime.today() - \ timedelta(days=(today_weekday - locale['firstweekday'])) end = start + timedelta(days=7) + return start, end, True else: split = start.split(" ") start, allday = guessdatetimefstr(split, locale) @@ -358,7 +359,6 @@ def guessrangefstr(daterange, locale, default_timedelta=None, adjust_reasonably= start -= timedelta(days=(start.weekday() - locale['firstweekday'])) end = start + timedelta(days=7) else: - try: delta = guesstimedeltafstr(end) end = start + delta
pimutils/khal
6ed1f1b68fe7c1662fef628396d2fbcc0e8b3abe
diff --git a/tests/aux_test.py b/tests/aux_test.py index 9aafcbf..bc43fc8 100644 --- a/tests/aux_test.py +++ b/tests/aux_test.py @@ -183,6 +183,11 @@ class TestGuessRangefstr(object): assert (datetime(2015, 12, 28), datetime(2016, 1, 4), True) == \ guessrangefstr('1.1.2016 week', locale=locale_de, default_timedelta="1d") + @freeze_time('20160216') + def test_week(self): + assert (datetime(2016, 2, 15), datetime(2016, 2, 22), True) == \ + guessrangefstr('week', locale=locale_de, default_timedelta="1d") + def test_invalid(self): with pytest.raises(ValueError): guessrangefstr('3d', locale=locale_de, default_timedelta="1d")
`khal list week` crashes Since https://github.com/pimutils/khal/commit/48020f06560f3ffd0d7845d24b6464ad8f7dd3c0 `khal list week` crashes like this: Traceback (most recent call last): File "/home/untitaker/.local/bin/khal", line 9, in <module> load_entry_point('khal', 'console_scripts', 'khal')() File "/home/untitaker/projects/click/click/core.py", line 716, in __call__ return self.main(*args, **kwargs) File "/home/untitaker/projects/click/click/core.py", line 696, in main rv = self.invoke(ctx) File "/home/untitaker/projects/click/click/core.py", line 1065, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/untitaker/projects/click/click/core.py", line 892, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/untitaker/projects/click/click/core.py", line 534, in invoke return callback(*args, **kwargs) File "/home/untitaker/projects/click/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "/home/untitaker/projects/khal/khal/cli.py", line 315, in klist env={"calendars": ctx.obj['conf']['calendars']} File "/home/untitaker/projects/khal/khal/controllers.py", line 262, in khal_list start, end = start_end_from_daterange(daterange, conf['locale'], td) File "/home/untitaker/projects/khal/khal/controllers.py", line 144, in start_end_from_daterange daterange, locale, default_timedelta=default_timedelta) File "/home/untitaker/projects/khal/khal/aux.py", line 350, in guessrangefstr if len(end) == 0: TypeError: object of type 'datetime.datetime' has no len() I feel like it is time to implement #169 with the `parsedatetime`.
0.0
6ed1f1b68fe7c1662fef628396d2fbcc0e8b3abe
[ "tests/aux_test.py::TestGuessRangefstr::test_week" ]
[ "tests/aux_test.py::TestIcsFromList::test_ics_from_list", "tests/aux_test.py::TestIcsFromList::test_ics_from_list_random_uid", "tests/aux_test.py::TestTimeDelta2Str::test_multi", "tests/aux_test.py::TestTimeDelta2Str::test_negative", "tests/aux_test.py::TestTimeDelta2Str::test_single", "tests/aux_test.py::test_normalize_component", "tests/aux_test.py::TestGuessRangefstr::test_start_and_end_date", "tests/aux_test.py::TestGuessRangefstr::test_invalid", "tests/aux_test.py::TestGuessRangefstr::test_time_tomorrow", "tests/aux_test.py::TestGuessRangefstr::test_start_and_end_date_time", "tests/aux_test.py::TestGuessRangefstr::test_start_and_eod", "tests/aux_test.py::TestGuessRangefstr::test_tomorrow", "tests/aux_test.py::TestGuessRangefstr::test_start_and_week", "tests/aux_test.py::TestGuessRangefstr::test_today", "tests/aux_test.py::TestGuessDatetimefstr::test_tomorrow", "tests/aux_test.py::TestGuessDatetimefstr::test_time_tomorrow", "tests/aux_test.py::TestGuessDatetimefstr::test_today", "tests/aux_test.py::TestGuessTimedeltafstr::test_garbage", "tests/aux_test.py::TestGuessTimedeltafstr::test_moregarbage", "tests/aux_test.py::TestGuessTimedeltafstr::test_multi_nospace", "tests/aux_test.py::TestGuessTimedeltafstr::test_single", "tests/aux_test.py::TestGuessTimedeltafstr::test_multi", "tests/aux_test.py::TestGuessTimedeltafstr::test_same", "tests/aux_test.py::TestGuessTimedeltafstr::test_negative" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2016-08-29 23:38:27+00:00
mit
4,556
pimutils__khal-495
diff --git a/khal/settings/settings.py b/khal/settings/settings.py index b9fde9d..254e424 100644 --- a/khal/settings/settings.py +++ b/khal/settings/settings.py @@ -31,7 +31,7 @@ from .exceptions import InvalidSettingsError, CannotParseConfigFileError, NoConf from khal import __productname__ from ..log import logger from .utils import is_timezone, weeknumber_option, config_checks, \ - expand_path, expand_db_path, is_color + expand_path, expand_db_path, is_color, get_vdir_type, get_color_from_vdir SPECPATH = os.path.join(os.path.dirname(__file__), 'khal.spec') @@ -66,12 +66,17 @@ def find_configuration_file(): return None -def get_config(config_path=None): +def get_config( + config_path=None, + _get_color_from_vdir=get_color_from_vdir, + _get_vdir_type=get_vdir_type): """reads the config file, validates it and return a config dict :param config_path: path to a custom config file, if none is given the default locations will be searched :type config_path: str + :param _get_color_from_vdir: override get_color_from_vdir for testing purposes + :param _get_vdir_type: override get_vdir_type for testing purposes :returns: configuration :rtype: dict """ @@ -124,7 +129,7 @@ def get_config(config_path=None): if abort or not results: raise InvalidSettingsError() - config_checks(user_config) + config_checks(user_config, _get_color_from_vdir, _get_vdir_type) extras = get_extra_values(user_config) for section, value in extras: diff --git a/khal/settings/utils.py b/khal/settings/utils.py index d7a714c..0470ea4 100644 --- a/khal/settings/utils.py +++ b/khal/settings/utils.py @@ -152,7 +152,10 @@ def get_vdir_type(_): return 'calendar' -def config_checks(config): +def config_checks( + config, + _get_color_from_vdir=get_color_from_vdir, + _get_vdir_type=get_vdir_type): """do some tests on the config we cannot do with configobj's validator""" if len(config['calendars'].keys()) < 1: logger.fatal('Found no calendar section in the config file') @@ -173,8 +176,8 @@ def config_checks(config): config['calendars'].pop(calendar) for vdir in sorted(vdirs): calendar = {'path': vdir, - 'color': get_color_from_vdir(vdir), - 'type': get_vdir_type(vdir), + 'color': _get_color_from_vdir(vdir), + 'type': _get_vdir_type(vdir), 'readonly': False } name = get_unique_name(vdir, config['calendars'].keys()) @@ -186,4 +189,4 @@ def config_checks(config): config['calendars'][calendar]['readonly'] = True if config['calendars'][calendar]['color'] == 'auto': config['calendars'][calendar]['color'] = \ - get_color_from_vdir(config['calendars'][calendar]['path']) + _get_color_from_vdir(config['calendars'][calendar]['path'])
pimutils/khal
7ff941fbb8f294de8eb3e6abaed48014a804f9d0
diff --git a/tests/settings_test.py b/tests/settings_test.py index 9b18bfc..51156b6 100644 --- a/tests/settings_test.py +++ b/tests/settings_test.py @@ -15,7 +15,11 @@ PATH = __file__.rsplit('/', 1)[0] + '/configs/' class TestSettings(object): def test_simple_config(self): - config = get_config(PATH + 'simple.conf') + config = get_config( + PATH + 'simple.conf', + _get_color_from_vdir=lambda x: None, + _get_vdir_type=lambda x: 'calendar', + ) comp_config = { 'calendars': { 'home': {'path': os.path.expanduser('~/.calendars/home/'), @@ -53,7 +57,11 @@ class TestSettings(object): get_config(PATH + 'nocalendars.conf') def test_small(self): - config = get_config(PATH + 'small.conf') + config = get_config( + PATH + 'small.conf', + _get_color_from_vdir=lambda x: None, + _get_vdir_type=lambda x: 'calendar', + ) comp_config = { 'calendars': { 'home': {'path': os.path.expanduser('~/.calendars/home/'),
Testsuite fails when test collections actually exist From https://aur.archlinux.org/packages/khal/#comment-560248 platform linux -- Python 3.5.2, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 rootdir: /tmp/yaourt-tmp-nicolas/aur-khal/src/khal-0.8.3, inifile: plugins: localserver-0.3.5, hypothesis-3.4.2, subtesthack-0.1.1 collected 178 items tests/aux_test.py ..................... tests/backend_test.py .......................... tests/cal_display_test.py ...xxx tests/cli_test.py .....x............. tests/controller_test.py .... tests/event_test.py .............................. tests/khalendar_aux_test.py ................................ tests/khalendar_test.py ...................... tests/settings_test.py F.F..... tests/terminal_test.py ... tests/vtimezone_test.py ... tests/ui/test_calendarwidget.py ... tests/ui/test_widgets.py . ============================================ FAILURES ============================================= _________________________________ TestSettings.test_simple_config _________________________________ self = <tests.settings_test.TestSettings object at 0x7f6799a26240> def test_simple_config(self): config = get_config(PATH + 'simple.conf') comp_config = { 'calendars': { 'home': {'path': os.path.expanduser('~/.calendars/home/'), 'readonly': False, 'color': None, 'type': 'calendar'}, 'work': {'path': os.path.expanduser('~/.calendars/work/'), 'readonly': False, 'color': None, 'type': 'calendar'}, }, 'sqlite': {'path': os.path.expanduser('~/.local/share/khal/khal.db')}, 'locale': { 'local_timezone': pytz.timezone('Europe/Berlin'), 'default_timezone': pytz.timezone('Europe/Berlin'), 'timeformat': '%H:%M', 'dateformat': '%d.%m.', 'longdateformat': '%d.%m.%Y', 'datetimeformat': '%d.%m. %H:%M', 'longdatetimeformat': '%d.%m.%Y %H:%M', 'firstweekday': 0, 'encoding': 'utf-8', 'unicode_symbols': True, 'weeknumbers': False, }, 'default': { 'default_command': 'calendar', 'default_calendar': None, 'show_all_days': False, 'print_new': 'False', 'days': 2, 'highlight_event_days': False } } for key in comp_config: > assert config[key] == comp_config[key] E assert {'home': {'pa...: 'calendar'}} == {'home': {'col...: 'calendar'}} E Differing items: E {'work': {'path': '/home/nicolas/.calendars/work/', 'color': '#E6C800FF', 'readonly': False, 'type': 'calendar'}} != {'work': {'color': None, 'path': '/home/nicolas/.calendars/work/', 'readonly': False, 'type': 'calendar'}} E {'home': {'path': '/home/nicolas/.calendars/home/', 'color': '#882F00FF', 'readonly': False, 'type': 'calendar'}} != {'home': {'color': None, 'path': '/home/nicolas/.calendars/home/', 'readonly': False, 'type': 'calendar'}} E Use -v to get the full diff tests/settings_test.py:50: AssertionError _____________________________________ TestSettings.test_small _____________________________________ self = <tests.settings_test.TestSettings object at 0x7f6799a6cb00> def test_small(self): config = get_config(PATH + 'small.conf') comp_config = { 'calendars': { 'home': {'path': os.path.expanduser('~/.calendars/home/'), 'color': 'dark green', 'readonly': False, 'type': 'calendar'}, 'work': {'path': os.path.expanduser('~/.calendars/work/'), 'readonly': True, 'color': None, 'type': 'calendar'}}, 'sqlite': {'path': os.path.expanduser('~/.local/share/khal/khal.db')}, 'locale': { 'local_timezone': get_localzone(), 'default_timezone': get_localzone(), 'timeformat': '%H:%M', 'dateformat': '%d.%m.', 'longdateformat': '%d.%m.%Y', 'datetimeformat': '%d.%m. %H:%M', 'longdatetimeformat': '%d.%m.%Y %H:%M', 'firstweekday': 0, 'encoding': 'utf-8', 'unicode_symbols': True, 'weeknumbers': False, }, 'default': { 'default_calendar': None, 'default_command': 'calendar', 'print_new': 'False', 'show_all_days': False, 'days': 2, 'highlight_event_days': False } } for key in comp_config: > assert config[key] == comp_config[key] E assert {'home': {'pa...: 'calendar'}} == {'home': {'col...: 'calendar'}} E Omitting 1 identical items, use -v to show E Differing items: E {'work': {'path': '/home/nicolas/.calendars/work/', 'readonly': True, 'color': '#E6C800FF', 'type': 'calendar'}} != {'work': {'color': None, 'path': '/home/nicolas/.calendars/work/', 'readonly': True, 'type': 'calendar'}} E Use -v to get the full diff tests/settings_test.py:90: AssertionError ========================= 2 failed, 172 passed, 4 xfailed in 5.65 seconds ========================= ==> ERROR: A failure occurred in check(). Aborting... ==> ERROR: Makepkg was unable to build khal. ==> Restart building khal ? [y/N]
0.0
7ff941fbb8f294de8eb3e6abaed48014a804f9d0
[ "tests/settings_test.py::TestSettings::test_simple_config", "tests/settings_test.py::TestSettings::test_small" ]
[ "tests/settings_test.py::TestSettings::test_nocalendars", "tests/settings_test.py::TestSettings::test_old_config", "tests/settings_test.py::TestSettings::test_extra_sections", "tests/settings_test.py::test_discover", "tests/settings_test.py::test_get_unique_name", "tests/settings_test.py::test_config_checks" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2016-08-30 00:20:35+00:00
mit
4,557
pimutils__khal-551
diff --git a/AUTHORS.txt b/AUTHORS.txt index 4c1283d..5123a7c 100644 --- a/AUTHORS.txt +++ b/AUTHORS.txt @@ -28,3 +28,4 @@ Troy Sankey - sankeytms [at] gmail [dot] com Mart Lubbers - mart [at] martlubbers [dot] net Paweł Fertyk - pfertyk [at] openmailbox [dot] org Moritz Kobel - moritz [at] kobelnet [dot] ch - http://www.kobelnet.ch +Guilhem Saurel - guilhem [at] saurel [dot] me - https://saurel.me diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fce5319..4dd4fcc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,6 +31,8 @@ not released yet * `import` can now import multiple files at once (Christian Geier) * configuration file path $XDG_CONFIG_HOME/khal/config is now supported and $XDG_CONFIG_HOME/khal/khal.conf deprecated +* events that start and end at the same time are now displayed as if their + duration was one hour instead of one day (Guilhem Saurel) ikhal ----- diff --git a/khal/khalendar/utils.py b/khal/khalendar/utils.py index b03d7da..27e16bc 100644 --- a/khal/khalendar/utils.py +++ b/khal/khalendar/utils.py @@ -189,7 +189,7 @@ def sanitize(vevent, default_timezone, href='', calendar=''): def sanitize_timerange(dtstart, dtend, duration=None): '''return sensible dtstart and end for events that have an invalid or - missing DTEND, assuming the event just lasts one day.''' + missing DTEND, assuming the event just lasts one hour.''' if isinstance(dtstart, datetime) and isinstance(dtend, datetime): if dtstart.tzinfo and not dtend.tzinfo: @@ -214,7 +214,11 @@ def sanitize_timerange(dtstart, dtend, duration=None): raise ValueError('The event\'s end time (DTEND) is older than ' 'the event\'s start time (DTSTART).') elif dtend == dtstart: - dtend += timedelta(days=1) + logger.warning( + "Event start time and end time are the same. " + "Assuming the event's duration is one hour." + ) + dtend += timedelta(hours=1) return dtstart, dtend
pimutils/khal
52b9da07a2a94ccf9c0d05aaacb633e14378aee5
diff --git a/tests/khalendar_aux_test.py b/tests/khalendar_aux_test.py index dbe60e9..6fe9316 100644 --- a/tests/khalendar_aux_test.py +++ b/tests/khalendar_aux_test.py @@ -736,6 +736,17 @@ END:VEVENT END:VCALENDAR """ +instant = """ +BEGIN:VCALENDAR +BEGIN:VEVENT +UID:instant123 +DTSTART;TZID=Europe/Berlin;VALUE=DATE-TIME:20170113T010000 +DTEND;TZID=Europe/Berlin;VALUE=DATE-TIME:20170113T010000 +SUMMARY:Really fast event +END:VEVENT +END:VCALENDAR +""" + class TestSanitize(object): @@ -754,3 +765,9 @@ class TestSanitize(object): def test_duration(self): vevent = _get_vevent_file('event_dtr_exdatez') vevent = utils.sanitize(vevent, berlin, '', '') + + def test_instant(self): + vevent = _get_vevent(instant) + assert vevent['DTEND'].dt - vevent['DTSTART'].dt == timedelta() + vevent = utils.sanitize(vevent, berlin, '', '') + assert vevent['DTEND'].dt - vevent['DTSTART'].dt == timedelta(hours=1)
iCal, DTSTART == DTEND Currently, when DTSTART == DTEND khal shows things like: ``` Today: 20:00→ : "blah foo bar" Tomorrow: → 20:00: "blah foo bar" ``` Google Agenda assume that event last 1 hour. Neither behavior is perfect and both are better than omitting the event. But khal output seems more confusing and by assuming that event last 24h give the visual impression that 2 distinct event happens (especially if other events happens in the timespan). Assuming up the end-of-the-day, or, like Google 1 hour, could be better trade-off.
0.0
52b9da07a2a94ccf9c0d05aaacb633e14378aee5
[ "tests/khalendar_aux_test.py::TestSanitize::test_instant" ]
[ "tests/khalendar_aux_test.py::TestExpand::test_expand_dt", "tests/khalendar_aux_test.py::TestExpand::test_expand_dtb", "tests/khalendar_aux_test.py::TestExpand::test_expand_dttz", "tests/khalendar_aux_test.py::TestExpand::test_expand_dtf", "tests/khalendar_aux_test.py::TestExpand::test_expand_d", "tests/khalendar_aux_test.py::TestExpand::test_expand_dtz", "tests/khalendar_aux_test.py::TestExpand::test_expand_dtzb", "tests/khalendar_aux_test.py::TestExpandNoRR::test_expand_dt", "tests/khalendar_aux_test.py::TestExpandNoRR::test_expand_dtb", "tests/khalendar_aux_test.py::TestExpandNoRR::test_expand_dttz", "tests/khalendar_aux_test.py::TestExpandNoRR::test_expand_dtf", "tests/khalendar_aux_test.py::TestExpandNoRR::test_expand_d", "tests/khalendar_aux_test.py::TestExpandNoRR::test_expand_rrule_exdate_z", "tests/khalendar_aux_test.py::TestSpecial::test_count", "tests/khalendar_aux_test.py::TestSpecial::test_until_d_notz", "tests/khalendar_aux_test.py::TestSpecial::test_latest_bug", "tests/khalendar_aux_test.py::TestSpecial::test_another_problem", "tests/khalendar_aux_test.py::TestSpecial::test_event_exdate_dt", "tests/khalendar_aux_test.py::TestSpecial::test_event_exdates_dt", "tests/khalendar_aux_test.py::TestSpecial::test_event_exdatesl_dt", "tests/khalendar_aux_test.py::TestSpecial::test_event_dt_rrule_invalid_until", "tests/khalendar_aux_test.py::TestSpecial::test_event_dt_rrule_invalid_until2", "tests/khalendar_aux_test.py::TestRDate::test_simple_rdate", "tests/khalendar_aux_test.py::TestRDate::test_rrule_and_rdate", "tests/khalendar_aux_test.py::TestRDate::test_rrule_past", "tests/khalendar_aux_test.py::TestRDate::test_rdate_date", "tests/khalendar_aux_test.py::TestSanitize::test_noend_date", "tests/khalendar_aux_test.py::TestSanitize::test_noend_datetime", "tests/khalendar_aux_test.py::TestSanitize::test_duration" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-01-13 00:16:36+00:00
mit
4,558
pimutils__khal-857
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 45dc5b3..b8b6417 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,13 @@ Package maintainers and users who have to manually update their installation may want to subscribe to `GitHub's tag feed <https://github.com/geier/khal/tags.atom>`_. + +0.10.1 +====== +2019-03-30 + +* FIX error with the new color priority system and `discover` calendar type + 0.10.0 ====== 2019-03-25 diff --git a/khal/settings/utils.py b/khal/settings/utils.py index ca03794..ed20432 100644 --- a/khal/settings/utils.py +++ b/khal/settings/utils.py @@ -226,7 +226,8 @@ def config_checks( calendar = {'path': vdir, 'color': _get_color_from_vdir(vdir), 'type': _get_vdir_type(vdir), - 'readonly': False + 'readonly': False, + 'priority': 10, } # get color from config if not defined in vdir
pimutils/khal
208faf23b56628907e9b8d104fddf293137441ab
diff --git a/tests/settings_test.py b/tests/settings_test.py index 037abac..a45fcf0 100644 --- a/tests/settings_test.py +++ b/tests/settings_test.py @@ -225,60 +225,70 @@ def test_config_checks(metavdirs): 'path': '/cal3/home', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'my calendar': { 'color': 'dark blue', 'path': '/cal1/public', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'my private calendar': { 'color': '#FF00FF', 'path': '/cal1/private', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'public': { 'color': None, 'path': '/cal2/public', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'public1': { 'color': None, 'path': '/cal3/public', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'work': { 'color': None, 'path': '/cal3/work', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'cfgcolor': { 'color': 'dark blue', 'path': '/cal4/cfgcolor', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'dircolor': { 'color': 'dark blue', 'path': '/cal4/dircolor', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'cfgcolor_again': { 'color': 'dark blue', 'path': '/cal4/cfgcolor_again', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, 'cfgcolor_once_more': { 'color': 'dark blue', 'path': '/cal4/cfgcolor_once_more', 'readonly': False, 'type': 'calendar', + 'priority': 10, }, },
Khal crashes on startup due to new priority system code Up until a few minutes ago I was using khal version `0.9.10`, and then upgraded to `0.10.0` and now it throws an exception on launch due to what appears to be the code added in #812 from the backtrace: ``` Traceback (most recent call last): File "/home/linuxbrew/.linuxbrew/bin/ikhal", line 10, in <module> sys.exit(main_ikhal()) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/click/core.py", line 764, in __call__ return self.main(*args, **kwargs) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/click/core.py", line 717, in main rv = self.invoke(ctx) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/click/core.py", line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/click/core.py", line 555, in invoke return callback(*args, **kwargs) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/click/decorators.py", line 17, in new_func return f(get_current_context(), *args, **kwargs) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/khal/cli.py", line 481, in interactive_cli multi_calendar_select(ctx, include_calendar, exclude_calendar) File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/khal/cli.py", line 165, in build_collection 'priority': cal['priority'], File "/home/linuxbrew/.linuxbrew/lib/python3.7/site-packages/configobj-5.0.6-py3.7.egg/configobj.py", line 554, in __getitem__ KeyError: 'priority' ``` my khal config has a bunch of calendar entries, none of which contain a `priority` property/key. I did test out adding the key with the default setting to all of my calendars just to see if that fixed it and it continues to get hung-up on this error. My config is back to not including the new `priority` keys and for now gonna just roll back to 0.9.10 via pip as that still seems to work okay.
0.0
208faf23b56628907e9b8d104fddf293137441ab
[ "tests/settings_test.py::test_config_checks" ]
[ "tests/settings_test.py::TestSettings::test_simple_config", "tests/settings_test.py::TestSettings::test_nocalendars", "tests/settings_test.py::TestSettings::test_one_level_calendar", "tests/settings_test.py::TestSettings::test_small", "tests/settings_test.py::TestSettings::test_old_config", "tests/settings_test.py::TestSettings::test_extra_sections", "tests/settings_test.py::TestSettings::test_default_calendar_readonly", "tests/settings_test.py::test_broken_color", "tests/settings_test.py::test_discover", "tests/settings_test.py::test_get_unique_name", "tests/settings_test.py::test_is_color" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2019-03-30 16:09:15+00:00
mit
4,559
pimutils__todoman-170
diff --git a/todoman/ui.py b/todoman/ui.py index 8e6c583..3364435 100644 --- a/todoman/ui.py +++ b/todoman/ui.py @@ -314,13 +314,15 @@ class TodoFormatter: ))) def parse_priority(self, priority): + if priority is None or priority is '': + return None if priority == 'low': return 9 elif priority == 'medium': return 5 elif priority == 'high': return 4 - elif priority == 'none' or priority is None: + elif priority == 'none': return 0 else: raise ValueError('Priority has to be one of low, medium,' @@ -392,7 +394,7 @@ class PorcelainFormatter(TodoFormatter): def parse_priority(self, priority): if priority is None: - return 0 + return None try: if int(priority) in range(0, 10): return int(priority)
pimutils/todoman
711690c512427bfddffc563d04d3982fb76a1f46
diff --git a/tests/conftest.py b/tests/conftest.py index 9cfed4a..a51e9ca 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -69,6 +69,19 @@ def now_for_tz(): return inner [email protected] +def todo_factory(default_database): + def inner(): + todo = model.FileTodo() + todo.list = list(default_database.lists())[0] + todo.summary = 'YARR!' + todo.save() + + return todo + + return inner + + settings.register_profile("ci", settings( max_examples=1000, verbosity=Verbosity.verbose, diff --git a/tests/test_ui.py b/tests/test_ui.py index 67b804f..57f8680 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -1,26 +1,24 @@ -from todoman.model import FileTodo -from todoman.ui import PorcelainFormatter, TodoEditor +import pytest +from urwid import ExitMainLoop +from todoman.ui import TodoEditor, TodoFormatter -def test_todo_editor(default_database): - """ - Tests TodoEditor +DATE_FORMAT = "%d-%m-%y" +TIME_FORMAT = "%H:%M" - While this is a pretty lame test, it's a lot better than nothing until we - have a proper testing framework for the interactive parts. - - It basically makes sure that we don't refer to any obsolete methods, etc. - """ +def test_todo_editor_priority(default_database, todo_factory): + todo = todo_factory() lists = list(default_database.lists()) + formatter = TodoFormatter(DATE_FORMAT, TIME_FORMAT, '') - todo = FileTodo(new=True) - todo.list = lists[0] - todo.summary = 'YARR!' - todo.save() - - porcelain_formatter = PorcelainFormatter() + editor = TodoEditor(todo, lists, formatter) + editor._priority.edit_text = '' - editor = TodoEditor(todo, lists, porcelain_formatter) + with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails + editor._keypress('ctrl s') - editor._keypress('ctrl s') + # FileTodo exposes 0 + assert todo.priority is 0 + # The actual todo contains None + assert todo.todo.get('priority', None) is None
TodoEditor requires setting a non-empty priority when saving It should default to `none`
0.0
711690c512427bfddffc563d04d3982fb76a1f46
[ "tests/test_ui.py::test_todo_editor_priority" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2017-03-04 01:29:23+00:00
isc
4,560
pimutils__todoman-299
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index df76db6..e2fcaf6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ releases, in reverse chronological order. v3.6.0 ------ * Allow passing a custom configuration file with the ``--config/-c`` option. +* Cached list metadata is now invalidated when it has changed on-disk. v3.5.0 ------ diff --git a/todoman/model.py b/todoman/model.py index 10a71b4..f4e4bda 100644 --- a/todoman/model.py +++ b/todoman/model.py @@ -395,7 +395,7 @@ class Cache: may be used for filtering/sorting. """ - SCHEMA_VERSION = 5 + SCHEMA_VERSION = 6 def __init__(self, path): self.cache_path = str(path) @@ -445,6 +445,8 @@ class Cache: "name" TEXT PRIMARY KEY, "path" TEXT, "colour" TEXT, + "mtime" INTEGER, + CONSTRAINT path_unique UNIQUE (path) ); ''' @@ -496,7 +498,7 @@ class Cache: os.remove(self.cache_path) self._conn = None - def add_list(self, name, path, colour): + def add_list(self, name, path, colour, mtime): """ Inserts a new list into the cache. @@ -513,17 +515,25 @@ class Cache: try: self._conn.execute( - "INSERT INTO lists (name, path, colour) VALUES (?, ?, ?)", + ''' + INSERT INTO lists ( + name, + path, + colour, + mtime + ) VALUES (?, ?, ?, ?) + ''', ( name, path, colour, + mtime, ), ) except sqlite3.IntegrityError as e: raise exceptions.AlreadyExists('list', name) from e - return self.add_list(name, path, colour) + return self.add_list(name, path, colour, mtime) def add_file(self, list_name, path, mtime): try: @@ -824,10 +834,14 @@ class Cache: return {l.name: l for l in self.lists()} def expire_lists(self, paths): - results = self._conn.execute("SELECT path, name from lists") + results = self._conn.execute("SELECT path, name, mtime from lists") for result in results: if result['path'] not in paths: self.delete_list(result['name']) + else: + mtime = paths.get(result['path']) + if mtime and mtime > result['mtime']: + self.delete_list(result['name']) def delete_list(self, name): self._conn.execute("DELETE FROM lists WHERE lists.name = ?", (name,)) @@ -894,6 +908,22 @@ class List: except (OSError, IOError): return split(normpath(path))[1] + @staticmethod + def mtime_for_path(path): + colour_file = os.path.join(path, 'color') + display_file = os.path.join(path, 'displayname') + + mtimes = [] + if os.path.exists(colour_file): + mtimes.append(_getmtime(colour_file)) + if os.path.exists(display_file): + mtimes.append(_getmtime(display_file)) + + if mtimes: + return max(mtimes) + else: + return 0 + def __eq__(self, other): if isinstance(other, List): return self.name == other.name @@ -918,7 +948,8 @@ class Database: self.update_cache() def update_cache(self): - self.cache.expire_lists(self.paths) + paths = {path: List.mtime_for_path(path) for path in self.paths} + self.cache.expire_lists(paths) paths_to_mtime = {} paths_to_list_name = {} @@ -928,6 +959,7 @@ class Database: List.name_for_path(path), path, List.colour_for_path(path), + paths[path], ) for entry in os.listdir(path): if not entry.endswith('.ics'):
pimutils/todoman
d6bef7a2ef6f5a729db965dadac38d753b227831
diff --git a/tests/test_model.py b/tests/test_model.py index 64ed5e9..1b60f39 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -101,6 +101,27 @@ def test_list_colour(tmpdir): assert list_.colour == '#8ab6d2' +def test_list_colour_cache_invalidation(tmpdir, sleep): + tmpdir.join('default').mkdir() + with tmpdir.join('default').join('color').open('w') as f: + f.write('#8ab6d2') + + db = Database([tmpdir.join('default')], tmpdir.join('cache.sqlite3')) + list_ = next(db.lists()) + + assert list_.colour == '#8ab6d2' + + sleep() + + with tmpdir.join('default').join('color').open('w') as f: + f.write('#f874fd') + + db = Database([tmpdir.join('default')], tmpdir.join('cache.sqlite3')) + list_ = next(db.lists()) + + assert list_.colour == '#f874fd' + + def test_list_no_colour(tmpdir): tmpdir.join('default').mkdir()
List metadata is cached forever If the metadata (eg: colour) for a cached list changes, we never re-read this. We might just want to save `max(colour_mtime, displayname_mtime)` for lists, and expire their cache entry if that changes.
0.0
d6bef7a2ef6f5a729db965dadac38d753b227831
[ "tests/test_model.py::test_list_colour_cache_invalidation" ]
[ "tests/test_model.py::test_todo_setters", "tests/test_model.py::test_cached_property_overwriting", "tests/test_model.py::test_list_equality", "tests/test_model.py::test_save_recurring_related", "tests/test_model.py::test_unreadable_ics", "tests/test_model.py::test_retain_unknown_fields", "tests/test_model.py::test_retain_tz", "tests/test_model.py::test_illegal_start_suppression", "tests/test_model.py::test_complete_recurring[True-None-20990315T020000]", "tests/test_model.py::test_is_completed", "tests/test_model.py::test_filename_uid_colision", "tests/test_model.py::test_todos_startable", "tests/test_model.py::test_default_status", "tests/test_model.py::test_querying", "tests/test_model.py::test_hide_cancelled", "tests/test_model.py::test_complete_recurring[True-tz0-20990315T020000Z]", "tests/test_model.py::test_complete_recurring[False-None-20990315T020000]", "tests/test_model.py::test_complete_recurring[False-tz0-20990315T020000Z]", "tests/test_model.py::test_cached_property_caching", "tests/test_model.py::test_clone", "tests/test_model.py::test_due_date", "tests/test_model.py::test_nullify_field", "tests/test_model.py::test_change_paths", "tests/test_model.py::test_cached_property_property", "tests/test_model.py::test_todo_filename_absolute_path", "tests/test_model.py::test_list_colour", "tests/test_model.py::test_duplicate_list", "tests/test_model.py::test_list_no_colour", "tests/test_model.py::test_database_priority_sorting", "tests/test_model.py::test_list_displayname" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-01-21 04:36:18+00:00
isc
4,561
pimutils__todoman-68
diff --git a/todoman/cli.py b/todoman/cli.py index e59f488..0703c85 100644 --- a/todoman/cli.py +++ b/todoman/cli.py @@ -304,7 +304,13 @@ def move(ctx, list, ids): @click.option('--reverse/--no-reverse', default=True, help='Sort tasks in reverse order (see --sort). ' 'Defaults to true.') -def list(ctx, lists, all, urgent, location, category, grep, sort, reverse): [email protected]('--due', default=None, help='Only show tasks due in DUE hours', + type=int) +# TODO: we might want a `porcelain` flag here to print this is a +# machine-friendly format that NEVER CHANGES! +def list( + ctx, lists, all, urgent, location, category, grep, sort, reverse, due, + ): """ List unfinished tasks. @@ -323,6 +329,7 @@ def list(ctx, lists, all, urgent, location, category, grep, sort, reverse): db = ctx.obj['db'] todos = db.todos( + due=due, all=all, category=category, grep=grep, diff --git a/todoman/model.py b/todoman/model.py index 91e515f..b9293bf 100644 --- a/todoman/model.py +++ b/todoman/model.py @@ -502,7 +502,7 @@ class Cache: ) def todos(self, all=False, lists=[], urgent=False, location='', - category='', grep='', sort=[], reverse=True): + category='', grep='', sort=[], reverse=True, due=None): list_map = {list.name: list for list in self.lists()} extra_where = [] @@ -532,6 +532,10 @@ class Cache: # params.append(grep) extra_where.append('AND summary LIKE ?') params.append('%{}%'.format(grep)) + if due: + max_due = datetime.now() + timedelta(hours=due) + extra_where.append('AND due IS NOT NULL AND due < ?') + params.append(max_due) if sort: order = []
pimutils/todoman
e6c27480329dceb6182454e06b39a5270317c3b9
diff --git a/tests/test_filtering.py b/tests/test_filtering.py index c8bd520..c1a234f 100644 --- a/tests/test_filtering.py +++ b/tests/test_filtering.py @@ -1,4 +1,7 @@ +from datetime import datetime, timedelta + from todoman.cli import cli +from todoman.model import Database def test_all(tmpdir, runner, create): @@ -154,3 +157,45 @@ def test_filtering_lists(tmpdir, runner, create): assert not result.exception assert len(result.output.splitlines()) == 1 assert 'todo two' in result.output + + +def test_due_aware(tmpdir, runner, create): + now = datetime.now() + + for i in [1, 23, 25, 48]: + due = now + timedelta(hours=i) + create( + 'test_{}.ics'.format(i), + 'SUMMARY:{}\n' + 'DUE;VALUE=DATE-TIME;TZID=CET:{}\n'.format( + i, due.strftime("%Y%m%dT%H%M%S"), + ) + ) + + db = Database([tmpdir.join('default')], tmpdir.join('cache.sqlite')) + todos = list(db.todos(due=24)) + + assert len(todos) == 2 + assert todos[0].summary == "23" + assert todos[1].summary == "1" + + +def test_due_naive(tmpdir, runner, create): + now = datetime.now() + + for i in [1, 23, 25, 48]: + due = now + timedelta(hours=i) + create( + 'test_{}.ics'.format(i), + 'SUMMARY:{}\n' + 'DUE;VALUE=DATE-TIME:{}\n'.format( + i, due.strftime("%Y%m%dT%H%M%S"), + ) + ) + + db = Database([tmpdir.join('default')], tmpdir.join('cache.sqlite')) + todos = list(db.todos(due=24)) + + assert len(todos) == 2 + assert todos[0].summary == "23" + assert todos[1].summary == "1"
List by "today", "this week" When I use `todo list` I get a very long list of all my tasks. I know I can filter by lists, but is it possible to filter by "this week", "today", "overdue"? I'd like to have a short list of today's (and overdue) tasks so I get a good feeling when I've done all for today :-)
0.0
e6c27480329dceb6182454e06b39a5270317c3b9
[ "tests/test_filtering.py::test_due_naive", "tests/test_filtering.py::test_due_aware" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-01-12 23:21:25+00:00
isc
4,562
pints-team__pints-1115
diff --git a/pints/_mcmc/__init__.py b/pints/_mcmc/__init__.py index c6090196..fa5c8853 100644 --- a/pints/_mcmc/__init__.py +++ b/pints/_mcmc/__init__.py @@ -418,6 +418,9 @@ class MCMCController(object): self._n_workers = 1 self.set_parallel() + # :meth:`run` can only be called once + self._has_run = False + # # Stopping criteria # @@ -507,6 +510,12 @@ class MCMCController(object): If storing chains to memory has been disabled with :meth:`set_chain_storage`, then ``None`` is returned instead. """ + + # Can only run once for each controller instance + if self._has_run: + raise RuntimeError("Controller is valid for single use only") + self._has_run = True + # Check stopping criteria has_stopping_criterion = False has_stopping_criterion |= (self._max_iterations is not None) diff --git a/pints/_nested/__init__.py b/pints/_nested/__init__.py index f19315e6..d06a7ccf 100644 --- a/pints/_nested/__init__.py +++ b/pints/_nested/__init__.py @@ -315,6 +315,9 @@ class NestedController(object): # Performance metrics self._time = None + # :meth:`run` can only be called once + self._has_run = False + def active_points(self): """ Returns the active points from nested sampling. @@ -522,6 +525,11 @@ class NestedController(object): samples and an estimate of the marginal likelihood. """ + # Can only run once for each controller instance + if self._has_run: + raise RuntimeError("Controller is valid for single use only") + self._has_run = True + # Choose method to evaluate f = self._initialise_callable() diff --git a/pints/_optimisers/__init__.py b/pints/_optimisers/__init__.py index 66dc177f..6b72063e 100644 --- a/pints/_optimisers/__init__.py +++ b/pints/_optimisers/__init__.py @@ -382,6 +382,9 @@ class OptimisationController(object): self._n_workers = 1 self.set_parallel() + # :meth:`run` can only be called once + self._has_run = False + # # Stopping criteria # @@ -452,6 +455,11 @@ class OptimisationController(object): """ Runs the optimisation, returns a tuple ``(xbest, fbest)``. """ + # Can only run once for each controller instance + if self._has_run: + raise RuntimeError("Controller is valid for single use only") + self._has_run = True + # Check stopping criteria has_stopping_criterion = False has_stopping_criterion |= (self._max_iterations is not None)
pints-team/pints
ad9f037ac8a62095825a887fbe87d49d978e9830
diff --git a/pints/tests/test_mcmc_controller.py b/pints/tests/test_mcmc_controller.py index 963a6a35..8657a3ac 100755 --- a/pints/tests/test_mcmc_controller.py +++ b/pints/tests/test_mcmc_controller.py @@ -379,6 +379,11 @@ class TestMCMCController(unittest.TestCase): self.assertEqual(chains.shape[2], nparameters) # Test with fixed number of worker processes + mcmc = pints.MCMCController( + self.log_posterior, nchains, xs, + method=pints.HaarioBardenetACMC) + mcmc.set_max_iterations(niterations) + mcmc.set_log_to_screen(debug) mcmc.set_parallel(5) mcmc.set_log_to_screen(True) self.assertIs(mcmc._parallel, True) @@ -614,6 +619,22 @@ class TestMCMCController(unittest.TestCase): self.log_posterior, 1, [self.real_parameters]) self.assertIsInstance(mcmc, pints.MCMCController) + def test_exception_on_multi_use(self): + # Controller should raise an exception if use multiple times + + # Test simple run + n_chains = 1 + n_iterations = 10 + x0 = np.array(self.real_parameters) * 1.1 + xs = [x0] + mcmc = pints.MCMCController(self.log_posterior, n_chains, xs) + mcmc.set_max_iterations(n_iterations) + mcmc.set_log_to_screen(False) + mcmc.run() + with self.assertRaisesRegex( + RuntimeError, 'Controller is valid for single use only'): + mcmc.run() + def test_post_run_statistics(self): # Test method to obtain post-run statistics diff --git a/pints/tests/test_nested_controller.py b/pints/tests/test_nested_controller.py index ed8b1505..491abb0b 100755 --- a/pints/tests/test_nested_controller.py +++ b/pints/tests/test_nested_controller.py @@ -131,6 +131,8 @@ class TestNestedController(unittest.TestCase): sampler.run() # Test with fixed number of worker processes + sampler = pints.NestedController( + self.log_likelihood, self.log_prior) sampler.set_parallel(4) sampler.set_log_to_screen(False) self.assertEqual(sampler.parallel(), 4) @@ -248,6 +250,8 @@ class TestNestedController(unittest.TestCase): logLikelihood1 = sampler.log_likelihood_vector() self.assertEqual(len(logLikelihood1), 400 + 100) self.assertTrue(ess1 > 0) + sampler = pints.NestedController( + self.log_likelihood, self.log_prior) iter = 2000 sampler.set_iterations(iter) sampler.set_n_posterior_samples(100) @@ -277,6 +281,8 @@ class TestNestedController(unittest.TestCase): self.assertTrue(elem <= 1) # Acive points + sampler = pints.NestedController( + self.log_likelihood, self.log_prior) sampler.set_iterations(100) sampler.set_log_to_screen(False) sampler.set_parallel(2) @@ -324,6 +330,19 @@ class TestNestedController(unittest.TestCase): m_inactive = sampler.inactive_points() self.assertTrue(m_inactive.shape[0] < 200) + def test_exception_on_multi_use(self): + # Controller should raise an exception if use multiple times + + sampler = pints.NestedController( + self.log_likelihood, self.log_prior) + sampler.set_n_posterior_samples(2) + sampler.set_iterations(10) + sampler.set_log_to_screen(False) + sampler.run() + with self.assertRaisesRegex( + RuntimeError, 'Controller is valid for single use only'): + sampler.run() + if __name__ == '__main__': unittest.main() diff --git a/pints/tests/test_opt_optimisation_controller.py b/pints/tests/test_opt_optimisation_controller.py index e5a78041..05739797 100755 --- a/pints/tests/test_opt_optimisation_controller.py +++ b/pints/tests/test_opt_optimisation_controller.py @@ -302,6 +302,22 @@ class TestOptimisationController(unittest.TestCase): self.assertGreater(opt.time(), 0) self.assertGreater(t_upper, opt.time()) + def test_exception_on_multi_use(self): + # Controller should raise an exception if use multiple times + + r = pints.toy.TwistedGaussianLogPDF(2, 0.01) + x = np.array([0, 1.01]) + b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) + s = 0.01 + opt = pints.OptimisationController(r, x, s, b, method=method) + opt.set_log_to_screen(False) + opt.set_max_unchanged_iterations(None) + opt.set_max_iterations(10) + opt.run() + with self.assertRaisesRegex(RuntimeError, + "Controller is valid for single use only"): + opt.run() + if __name__ == '__main__': print('Add -v for more debug output')
Make controllers raise exception if used twice Now that we've decided they're officially single use objects. See: #640
0.0
ad9f037ac8a62095825a887fbe87d49d978e9830
[ "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_exception_on_multi_use" ]
[ "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_multiple_samplers_mixed_index_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_multiple_samplers_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_multiple_samplers_same_index_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_one_sampler_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_one_sampler_with_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_multi_chain_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_multi_chain_with_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_single_chain_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_single_chain_with_nones", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_deprecated_alias", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_logging", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_optimise", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_parallel", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_post_run_statistics", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_set_population_size", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_max_iterations", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_max_unchanged", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_no_criterion", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_threshold", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_transform" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2020-04-27 16:13:34+00:00
bsd-3-clause
4,563
pints-team__pints-1397
diff --git a/CHANGELOG.md b/CHANGELOG.md index db8dc7a6..f1bd4406 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ All notable changes to this project will be documented in this file. ## [Unreleased] ### Added +- [#1409](https://github.com/pints-team/pints/pull/1409) The `OptimisationController` now accepts a callback function that will be called at every iteration; this can be used for easier customisation or visualisation of the optimiser trajectory. - [#1383](https://github.com/pints-team/pints/pull/1383) Added a method `toy.TwistedGaussianDistribution.untwist` that turns samples from this distribution into samples from a multivariate Gaussian. - [#1322](https://github.com/pints-team/pints/pull/1322) Added a method `sample_initial_points` that allows users to generate random points with finite metrics (either log-probabilities or error measures) to use as starting points for sampling or optimisation. - [#1243](https://github.com/pints-team/pints/pull/1243) Added testing for Python 3.9. diff --git a/pints/_optimisers/__init__.py b/pints/_optimisers/__init__.py index 9358ed24..1f2f87dd 100644 --- a/pints/_optimisers/__init__.py +++ b/pints/_optimisers/__init__.py @@ -380,6 +380,9 @@ class OptimisationController(object): self._n_workers = 1 self.set_parallel() + # User callback + self._callback = None + # :meth:`run` can only be called once self._has_run = False @@ -452,6 +455,11 @@ class OptimisationController(object): def run(self): """ Runs the optimisation, returns a tuple ``(xbest, fbest)``. + + An optional ``callback`` function can be passed in that will be called + at the end of every iteration. The callback should take the arguments + ``(iteration, optimiser)``, where ``iteration`` is the iteration count + (an integer) and ``optimiser`` is the optimiser object. """ # Can only run once for each controller instance if self._has_run: @@ -627,6 +635,9 @@ class OptimisationController(object): running = False halt_message = ('Halting: ' + str(error)) + elif self._callback is not None: + self._callback(iteration - 1, self._optimiser) + except (Exception, SystemExit, KeyboardInterrupt): # pragma: no cover # Unexpected end! # Show last result and exit @@ -670,6 +681,25 @@ class OptimisationController(object): # Return best position and score return xbest, fbest_user + def set_callback(self, cb=None): + """ + Allows a "callback" function to be passed in that will be called at the + end of every iteration. + + This can be used for e.g. visualising optimiser progress. + + Example:: + + def cb(opt): + plot(opt.xbest()) + + opt.set_callback(cb) + + """ + if cb is not None and not callable(cb): + raise ValueError('The argument cb must be None or a callable.') + self._callback = cb + def set_log_interval(self, iters=20, warm_up=3): """ Changes the frequency with which messages are logged. diff --git a/setup.py b/setup.py index 3b2d6ba9..f6158260 100644 --- a/setup.py +++ b/setup.py @@ -41,8 +41,8 @@ setup( # Maintainer information # author='', # author_email='', - maintainer='Michael Clerx', - maintainer_email='[email protected]', + maintainer='PINTS Team', + maintainer_email='[email protected]', url='https://github.com/pints-team/pints', # Packages to include
pints-team/pints
d64eb05fec565f811b5cf2c8e50a81654b175ab1
diff --git a/pints/tests/test_opt_optimisation_controller.py b/pints/tests/test_opt_optimisation_controller.py index 37ec7fde..81c26191 100755 --- a/pints/tests/test_opt_optimisation_controller.py +++ b/pints/tests/test_opt_optimisation_controller.py @@ -26,6 +26,57 @@ class TestOptimisationController(unittest.TestCase): """ Called before every test """ np.random.seed(1) + def test_callback(self): + # Tests running with a callback method + + # Define callback that just stores the argument(s) it was called with + args = [] + + def cb(*arg): + args.append(arg) + + # Set up a controller + r = pints.toy.TwistedGaussianLogPDF(2, 0.01) + x0 = np.array([0, 1.01]) + s = 0.01 + opt = pints.OptimisationController(r, x0, s, method=method) + opt.set_log_to_screen(False) + opt.set_max_unchanged_iterations(None) + opt.set_max_iterations(10) + + # Pass in an invalid value + self.assertRaisesRegex( + ValueError, 'None or a callable', opt.set_callback, 3) + + # Now test using it correctly + opt.set_callback(None) + opt.set_callback(cb) + opt.run() + + # Ensure callback was called at each iteration + self.assertEqual(len(args), opt.iterations()) + + # Ensure first argument was iteration count + a = np.array([arg[0] for arg in args]) + self.assertTrue(np.all(a == np.arange(opt.iterations()))) + + # Ensure second argument was always the optimisation method + b = tuple(set([arg[1] for arg in args])) + self.assertEqual(len(b), 1) + self.assertIs(b[0], opt.optimiser()) + + # Check unsetting works + args.clear() + self.assertEqual(len(args), 0) + opt = pints.OptimisationController(r, x0, s, method=method) + opt.set_log_to_screen(False) + opt.set_max_unchanged_iterations(None) + opt.set_max_iterations(10) + opt.set_callback(cb) + opt.set_callback(None) + opt.run() + self.assertEqual(len(args), 0) + def test_optimise(self): # Tests :meth: `pints.optimise()`.
Replace maintainer+email with "devs" and public mailing list https://github.com/pints-team/pints/blob/master/setup.py#L44-L45 "PINTS team" or "PINTS developers" and public mailing list [email protected]
0.0
d64eb05fec565f811b5cf2c8e50a81654b175ab1
[ "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_callback" ]
[ "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_deprecated_alias", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_exception_on_multi_use", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_logging", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_optimise", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_parallel", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_post_run_statistics", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_set_population_size", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_max_iterations", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_max_unchanged", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_no_criterion", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_stopping_threshold", "pints/tests/test_opt_optimisation_controller.py::TestOptimisationController::test_transform" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-08-24 11:31:08+00:00
bsd-3-clause
4,564
pints-team__pints-1444
diff --git a/True b/True new file mode 100644 index 00000000..844f2a6b --- /dev/null +++ b/True @@ -0,0 +1,4 @@ +Iter. Eval. Acceptance rate Time m:s +1 198 0.00505050505 0:00.1 +2 1029 0.0019436346 0:00.3 +3 3211 0.000934288384 0:01.0 diff --git a/docs/source/function_evaluation.rst b/docs/source/function_evaluation.rst index d02d0ad3..0e089b08 100644 --- a/docs/source/function_evaluation.rst +++ b/docs/source/function_evaluation.rst @@ -24,6 +24,7 @@ Overview: - :class:`Evaluator` - :class:`ParallelEvaluator` - :class:`SequentialEvaluator` +- :class:`MultiSequentialEvaluator` .. autofunction:: evaluate @@ -34,3 +35,4 @@ Overview: .. autoclass:: SequentialEvaluator +.. autoclass:: MultiSequentialEvaluator diff --git a/pints/__init__.py b/pints/__init__.py index 51b3e350..dbe8f045 100644 --- a/pints/__init__.py +++ b/pints/__init__.py @@ -156,6 +156,7 @@ from ._evaluation import ( Evaluator, ParallelEvaluator, SequentialEvaluator, + MultiSequentialEvaluator, ) diff --git a/pints/_evaluation.py b/pints/_evaluation.py index 51d267ee..cb194e20 100644 --- a/pints/_evaluation.py +++ b/pints/_evaluation.py @@ -413,6 +413,41 @@ multiprocessing.html#all-platforms>`_ for details). return errors +class MultiSequentialEvaluator(Evaluator): + """ + Evaluates a list of functions (or callable objects) for a list of input + values of the same length, and returns a list containing the calculated + function evaluations. + + Extends :class:`Evaluator`. + + Parameters + ---------- + functions : list of callable + The functions to evaluate. + args : sequence + An optional tuple containing extra arguments to each element in + functions, ``f``. If ``args`` is specified, ``f`` will be called as + ``f(x, *args)``. + """ + def __init__(self, functions, args=None): + super(MultiSequentialEvaluator, self).__init__(functions[0], args) + + # Check functions + for function in functions: + if not callable(function): + raise ValueError('The given functions must be callable.') + self._functions = functions + self._n_functions = len(functions) + + def _evaluate(self, positions): + if len(positions) != self._n_functions: + raise ValueError('Number of positions does not equal number of ' + 'functions.') + + return [f(x, *self._args) for f, x in zip(self._functions, positions)] + + class SequentialEvaluator(Evaluator): """ Evaluates a function (or callable object) for a list of input values, and @@ -522,4 +557,3 @@ class _Worker(multiprocessing.Process): except (Exception, KeyboardInterrupt, SystemExit): self._errors.put((self.pid, traceback.format_exc())) self._error.set() - diff --git a/pints/_mcmc/__init__.py b/pints/_mcmc/__init__.py index 62071cab..f6514036 100644 --- a/pints/_mcmc/__init__.py +++ b/pints/_mcmc/__init__.py @@ -274,7 +274,10 @@ class MCMCController(object): ---------- log_pdf : pints.LogPDF A :class:`LogPDF` function that evaluates points in the parameter - space. + space, or a list of :class:`LogPDF` of the same length as `chains`. If + multiple LogPDFs are provided, each chain will call only its + corresponding LogPDF. Note that if multiple LogPDFs are provided, + parallel running is not possible. chains : int The number of MCMC chains to generate. x0 @@ -301,16 +304,42 @@ class MCMCController(object): self, log_pdf, chains, x0, sigma0=None, transformation=None, method=None): - # Check function - if not isinstance(log_pdf, pints.LogPDF): - raise ValueError('Given function must extend pints.LogPDF') + if isinstance(log_pdf, pints.LogPDF): + self._multi_logpdf = False + + else: + self._multi_logpdf = True + try: + if len(log_pdf) != chains: + raise ValueError( + '`log_pdf` must either extend pints.LogPDF, ' + 'or be a list of objects which extend ' + 'pints.LogPDF of the same length as `chains`') + except TypeError: + raise TypeError('`log_pdf` must either extend pints.LogPDF, ' + 'or be a list of objects which extend ' + 'pints.LogPDF') + + first_n_params = log_pdf[0].n_parameters() + for pdf in log_pdf: + # Check function + if not isinstance(pdf, pints.LogPDF): + raise ValueError('Elements of `log_pdf` must extend ' + 'pints.LogPDF') + if pdf.n_parameters() != first_n_params: + raise ValueError('All log_pdfs must have the same number ' + 'of parameters.') # Apply a transformation (if given). From this point onward the MCMC # sampler will see only the transformed search space and will know # nothing about the model parameter space. if transformation is not None: # Convert log pdf - log_pdf = transformation.convert_log_pdf(log_pdf) + if self._multi_logpdf: + log_pdf = [transformation.convert_log_pdf(pdf) + for pdf in log_pdf] + else: + log_pdf = transformation.convert_log_pdf(log_pdf) # Convert initial positions x0 = [transformation.to_search(x) for x in x0] @@ -318,7 +347,10 @@ class MCMCController(object): # Convert sigma0, if provided if sigma0 is not None: sigma0 = np.asarray(sigma0) - n_parameters = log_pdf.n_parameters() + if not self._multi_logpdf: + n_parameters = log_pdf.n_parameters() + else: + n_parameters = log_pdf[0].n_parameters() # Make sure sigma0 is a (covariance) matrix if np.product(sigma0.shape) == n_parameters: # Convert from 1d array @@ -341,7 +373,10 @@ class MCMCController(object): self._log_pdf = log_pdf # Get number of parameters - self._n_parameters = self._log_pdf.n_parameters() + if not self._multi_logpdf: + self._n_parameters = self._log_pdf.n_parameters() + else: + self._n_parameters = self._log_pdf[0].n_parameters() # Check number of chains self._n_chains = int(chains) @@ -528,15 +563,24 @@ class MCMCController(object): # Choose method to evaluate f = self._log_pdf if self._needs_sensitivities: - f = f.evaluateS1 + if not self._multi_logpdf: + f = f.evaluateS1 + else: + f = [pdf.evaluateS1 for pdf in f] # Create evaluator object if self._parallel: - # Use at most n_workers workers - n_workers = min(self._n_workers, self._n_chains) - evaluator = pints.ParallelEvaluator(f, n_workers=n_workers) + if not self._multi_logpdf: + # Use at most n_workers workers + n_workers = min(self._n_workers, self._n_chains) + evaluator = pints.ParallelEvaluator(f, n_workers=n_workers) + else: + raise ValueError('Cannot run multiple logpdfs in parallel') else: - evaluator = pints.SequentialEvaluator(f) + if not self._multi_logpdf: + evaluator = pints.SequentialEvaluator(f) + else: + evaluator = pints.MultiSequentialEvaluator(f) # Initial phase if self._needs_initial_phase: @@ -1041,6 +1085,9 @@ class MCMCController(object): than 0. Parallelisation can be disabled by setting ``parallel`` to ``0`` or ``False``. + + Parallel evaluation is only supported when a single LogPDF has been + provided to the MCMC controller. """ if parallel is True: self._parallel = True
pints-team/pints
7380440f6e79eb4dc19fdbbba0b57fb6b61cb653
diff --git a/pints/tests/test_evaluators.py b/pints/tests/test_evaluators.py index 670823c9..b3fc7895 100755 --- a/pints/tests/test_evaluators.py +++ b/pints/tests/test_evaluators.py @@ -53,6 +53,43 @@ class TestEvaluators(unittest.TestCase): # Args must be a sequence self.assertRaises(ValueError, pints.SequentialEvaluator, f_args, 1) + def test_multi_sequential(self): + + # Create test data + xs = np.random.normal(0, 10, 100) + ys = [f(x) for x in xs] + + # Test sequential evaluator with multiple functions + e = pints.MultiSequentialEvaluator([f for _ in range(100)]) + self.assertTrue(np.all(ys == e.evaluate(xs))) + + # check errors + + # not iterable + with self.assertRaises(TypeError): + e = pints.MultiSequentialEvaluator(3) + + # not callable + with self.assertRaises(ValueError): + e = pints.MultiSequentialEvaluator([f, 4]) + + e = pints.MultiSequentialEvaluator([f for _ in range(100)]) + # Argument must be sequence + with self.assertRaises(ValueError): + e.evaluate(1) + + # wrong number of arguments + with self.assertRaises(ValueError): + e.evaluate([1 for _ in range(99)]) + + # Test args + e = pints.MultiSequentialEvaluator([f_args, f_args_plus1], [10, 20]) + self.assertEqual(e.evaluate([1, 1]), [31, 32]) + + # Args must be a sequence + self.assertRaises( + ValueError, pints.MultiSequentialEvaluator, [f_args], 1) + def test_parallel(self): # Create test data @@ -212,6 +249,10 @@ def f_args(x, y, z): return x + y + z +def f_args_plus1(x, y, z): + return x + y + z + 1 + + def ioerror_on_five(x): if x == 5: raise IOError diff --git a/pints/tests/test_mcmc_controller.py b/pints/tests/test_mcmc_controller.py index ceba9f04..fb03de62 100755 --- a/pints/tests/test_mcmc_controller.py +++ b/pints/tests/test_mcmc_controller.py @@ -11,7 +11,9 @@ import pints import pints.io import pints.toy import unittest +import unittest.mock import numpy as np +import numpy.testing as npt from shared import StreamCapture, TemporaryDirectory @@ -80,6 +82,17 @@ class TestMCMCController(unittest.TestCase): cls.log_posterior = pints.LogPosterior( cls.log_likelihood, cls.log_prior) + # Create another log-likelihood with two noise parameters + cls.log_likelihood_2 = pints.AR1LogLikelihood(problem) + cls.log_prior_2 = pints.UniformLogPrior( + [0.01, 400, 0.0, 0.0], + [0.02, 600, 100, 1] + ) + + # Create an un-normalised log-posterior (log-likelihood + log-prior) + cls.log_posterior_2 = pints.LogPosterior( + cls.log_likelihood_2, cls.log_prior_2) + def test_single(self): # Test with a SingleChainMCMC method. @@ -114,7 +127,7 @@ class TestMCMCController(unittest.TestCase): def f(x): return x self.assertRaisesRegex( - ValueError, 'extend pints.LogPDF', pints.MCMCController, + TypeError, 'extend pints.LogPDF', pints.MCMCController, f, n_chains, xs) # Test x0 and chain argument @@ -361,6 +374,100 @@ class TestMCMCController(unittest.TestCase): pints.MCMCController, self.log_posterior, n_chains, xs, sigma0, method=meth, transformation=logt) + def test_multi_logpdf(self): + # Test with multiple logpdfs + + # 2 chains + x0 = np.array(self.real_parameters) * 1.1 + x1 = np.array(self.real_parameters) * 1.15 + xs = [x0, x1] + + # Not iterable + with self.assertRaises(TypeError): + mcmc = pints.MCMCController(1, 3, xs) + + # Wrong number of logpdfs + with self.assertRaises(ValueError): + mcmc = pints.MCMCController( + [self.log_posterior, self.log_posterior], 3, xs) + + # List does not contain logpdfs + with self.assertRaises(ValueError): + mcmc = pints.MCMCController( + [self.log_posterior, 'abc'], 2, xs) + + # Pdfs have different numbers of n_parameters + with self.assertRaises(ValueError): + mcmc = pints.MCMCController( + [self.log_posterior, self.log_posterior_2], 2, xs) + + # Correctly configured inputs + n_chains = len(xs) + n_parameters = len(x0) + n_iterations = 10 + mcmc = pints.MCMCController( + [self.log_posterior, self.log_posterior], + n_chains, + xs, + transformation=pints.LogTransformation(n_parameters), + sigma0=[1, 0.1, 0.01]) + mcmc.set_max_iterations(n_iterations) + mcmc.set_log_to_screen(False) + chains = mcmc.run() + self.assertEqual(chains.shape[0], n_chains) + self.assertEqual(chains.shape[1], n_iterations) + self.assertEqual(chains.shape[2], n_parameters) + self.assertIs(chains, mcmc.chains()) + + # With sensitivities needed + mcmc = pints.MCMCController( + [self.log_posterior, self.log_posterior], + n_chains, + xs, + transformation=pints.LogTransformation(n_parameters), + sigma0=[1, 0.1, 0.01], + method=pints.HamiltonianMCMC) + mcmc.set_max_iterations(n_iterations) + mcmc.set_log_to_screen(False) + chains = mcmc.run() + self.assertEqual(chains.shape[0], n_chains) + self.assertEqual(chains.shape[1], n_iterations) + self.assertEqual(chains.shape[2], n_parameters) + self.assertIs(chains, mcmc.chains()) + + # Parallel (currently raises error) + mcmc = pints.MCMCController( + [self.log_posterior, self.log_posterior], + n_chains, + xs, + transformation=pints.LogTransformation(n_parameters), + sigma0=[1, 0.1, 0.01]) + mcmc.set_parallel(True) + mcmc.set_max_iterations(n_iterations) + mcmc.set_log_to_screen(False) + with self.assertRaises(ValueError): + chains = mcmc.run() + + # Test that both logpdfs are called + logpdf1 = unittest.mock.MagicMock( + return_value=-1.0, spec=self.log_posterior) + logpdf2 = unittest.mock.MagicMock( + return_value=-2.0, spec=self.log_posterior) + attrs = {'n_parameters.return_value': 3} + logpdf1.configure_mock(**attrs) + logpdf2.configure_mock(**attrs) + mcmc = pints.MCMCController([logpdf1, logpdf2], n_chains, xs) + mcmc.set_max_iterations(n_iterations) + mcmc.set_log_to_screen(False) + chains = mcmc.run() + + logpdf1.assert_called() + logpdf2.assert_called() + + # Check that they got called with the corresponding x0 at the start + npt.assert_allclose(logpdf1.call_args_list[0][0][0], xs[0]) + npt.assert_allclose(logpdf2.call_args_list[0][0][0], xs[1]) + def test_stopping(self): # Test different stopping criteria.
Optionally copy logpdfs before running mcmc Currently, the MCMC controller calls `__call__` or `evaluateS1` on the same `log_pdf` object for all mcmc chains. This causes problems for more complex LogPDF objects which have attributes which may be updated based on the parameters sent to the call method (in my case, the LogPDF object holds a grid of ODE solver time points which is updated adaptively as the chain moves through parameter space). It's easy enough to fix by making one copy.deepcopy of the log_pdf for each chain, and adding an appropriate pints.Evaluator so each chain is just calling its own log_pdf object. @MichaelClerx let me know if this makes any sense as an optional argument to the MCMC controller !
0.0
7380440f6e79eb4dc19fdbbba0b57fb6b61cb653
[ "pints/tests/test_evaluators.py::TestEvaluators::test_multi_sequential" ]
[ "pints/tests/test_evaluators.py::TestEvaluators::test_function", "pints/tests/test_evaluators.py::TestEvaluators::test_parallel", "pints/tests/test_evaluators.py::TestEvaluators::test_parallel_random", "pints/tests/test_evaluators.py::TestEvaluators::test_sequential", "pints/tests/test_evaluators.py::TestEvaluators::test_worker", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_multiple_samplers_mixed_index_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_multiple_samplers_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_multiple_samplers_same_index_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_one_sampler_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerSingleChainStorage::test_one_sampler_with_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_multi_chain_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_multi_chain_with_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_single_chain_no_nones", "pints/tests/test_mcmc_controller.py::TestMCMCControllerMultiChainStorage::test_single_chain_with_nones" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-03-29 10:09:26+00:00
bsd-3-clause
4,565
pints-team__pints-1456
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d172892..56fb6b94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ All notable changes to this project will be documented in this file. ### Added - [#1459](https://github.com/pints-team/pints/pull/1459) Added the `iRprop-` local optimiser. +- [#1456](https://github.com/pints-team/pints/pull/1456) Added an optional `translation` to `ScalingTransform` and added a `UnitCubeTransformation` class. - [#1432](https://github.com/pints-team/pints/pull/1432) Added 2 new stochastic models: production and degradation model, Schlogl's system of chemical reactions. Moved the stochastic logistic model into `pints.stochastic` to take advantage of the `MarkovJumpModel`. - [#1420](https://github.com/pints-team/pints/pull/1420) The `Optimiser` class now distinguishes between a best-visited point (`x_best`, with score `f_best`) and a best-guessed point (`x_guessed`, with approximate score `f_guessed`). For most optimisers, the two values are equivalent. The `OptimisationController` still tracks `x_best` and `f_best` by default, but this can be modified using the methods `set_f_guessed_tracking` and `f_guessed_tracking`. - [#1417](https://github.com/pints-team/pints/pull/1417) Added a module `toy.stochastic` for stochastic models. In particular, `toy.stochastic.MarkovJumpModel` implements Gillespie's algorithm for easier future implementation of stochastic models. diff --git a/docs/source/transformations.rst b/docs/source/transformations.rst index 6c2ab145..38d9880e 100644 --- a/docs/source/transformations.rst +++ b/docs/source/transformations.rst @@ -44,6 +44,7 @@ Overview: - :class:`TransformedErrorMeasure` - :class:`TransformedLogPDF` - :class:`TransformedLogPrior` +- :class:`UnitCubeTransformation` .. autoclass:: ComposedTransformation @@ -67,3 +68,6 @@ Overview: .. autoclass:: TransformedLogPDF .. autoclass:: TransformedLogPrior + +.. autoclass:: UnitCubeTransformation + diff --git a/pints/__init__.py b/pints/__init__.py index 6b1c0211..e315c237 100644 --- a/pints/__init__.py +++ b/pints/__init__.py @@ -269,6 +269,7 @@ from ._transformation import ( TransformedErrorMeasure, TransformedLogPDF, TransformedLogPrior, + UnitCubeTransformation, ) diff --git a/pints/_transformation.py b/pints/_transformation.py index cf0da5a4..0fe44538 100644 --- a/pints/_transformation.py +++ b/pints/_transformation.py @@ -834,23 +834,32 @@ class RectangularBoundariesTransformation(Transformation): class ScalingTransformation(Transformation): """ - Scales the input parameters by multiplying with an array ``scalings``. + Scales the input parameters by multiplying with an array ``scalings`` and + adding an optional array ``translation``. The transformation from model parameters ``p`` to search parameters ``q`` is performed as:: - q = p * scalings + q = (p + translation) * scalings Its Jacobian matrix is a diagonal matrix with ``1 / scalings`` on the diagonal. Extends :class:`Transformation`. """ - def __init__(self, scalings): + def __init__(self, scalings, translation=None): self._s = pints.vector(scalings) self._inv_s = 1. / self._s self._n_parameters = len(self._s) + self._translation = None + if translation is not None: + self._translation = pints.vector(translation) + if len(self._translation) != self._n_parameters: + raise ValueError( + 'Translation must be None or be a vector of the same' + ' length as the scalings.') + def elementwise(self): """ See :meth:`Transformation.elementwise()`. """ return True @@ -878,12 +887,16 @@ class ScalingTransformation(Transformation): def to_model(self, q): """ See :meth:`Transformation.to_model()`. """ - q = pints.vector(q) - return self._inv_s * q + p = self._inv_s * pints.vector(q) + if self._translation is not None: + p -= self._translation + return p def to_search(self, p): """ See :meth:`Transformation.to_search()`. """ p = pints.vector(p) + if self._translation is not None: + p = p + self._translation return self._s * p @@ -1134,3 +1147,32 @@ class TransformedLogPrior(TransformedLogPDF, pints.LogPrior): for i, p in enumerate(ps): qs[i, :] = self._transform.to_search(p) return qs + + +class UnitCubeTransformation(ScalingTransformation): + """ + Maps a parameter space onto the unit (hyper)cube. + + Transformations from model parameters ``p`` to search parameters ``q`` are + made as:: + + q = (p - lower) / (upper - lower) + + Extends :class:`ScalingTransformation`. + """ + def __init__(self, lower, upper): + + # Check input + self._lower = pints.vector(lower) + self._upper = pints.vector(upper) + self._n_parameters = len(lower) + del lower, upper + + if len(self._upper) != self._n_parameters: + raise ValueError( + 'Lower and upper bounds must have the same length.') + if not np.all(self._upper > self._lower): + raise ValueError('Upper bounds must exceed lower bounds.') + + super().__init__(1 / (self._upper - self._lower), -self._lower) +
pints-team/pints
9acb2380dbd1b930a03de8e7e33885e7ab41fa3c
diff --git a/pints/tests/test_transformation.py b/pints/tests/test_transformation.py index 2166c4b0..d04ff5cf 100755 --- a/pints/tests/test_transformation.py +++ b/pints/tests/test_transformation.py @@ -594,7 +594,7 @@ class TestRectangularBoundariesTransformation(unittest.TestCase): class TestScalingTransformation(unittest.TestCase): - # Test ScalingTransformation class + """ Tests the ScalingTransformation class, without a translation. """ @classmethod def setUpClass(cls): @@ -653,6 +653,127 @@ class TestScalingTransformation(unittest.TestCase): self.assertTrue(self.t.elementwise()) +class TestScalingTransformationWithTranslation(unittest.TestCase): + """ Tests the ScalingTransformation class, with a translation. """ + + @classmethod + def setUpClass(cls): + # Create Transformation class + cls.p = np.array([-77, 0.333, 5, 66.66]) + cls.o = np.array([-100, 0, 5, 33.33]) + cls.s = np.array([-177, 0.333, 10., 99.99]) + cls.t = pints.ScalingTransformation(1 / cls.s, cls.o) + cls.x = [1., 1., 1., 1.] + cls.j = np.diag(cls.s) + cls.j_s1 = np.zeros((4, 4, 4)) + cls.log_j_det = 10.9841922175539395 + cls.log_j_det_s1 = np.zeros(4) + + def test_creation(self): + # Tests creation options (at the moment just errors) + pints.ScalingTransformation(1 / self.s, None) + pints.ScalingTransformation(1 / self.s, list(self.o)) + self.assertRaisesRegex( + ValueError, 'same length', + pints.ScalingTransformation, self.s, self.o[:-1]) + self.assertRaisesRegex( + ValueError, 'same length', + pints.ScalingTransformation, self.s, [1, 2, 3, 4, 5]) + + def test_to_search(self): + # Test forward transform + self.assertTrue(np.allclose(self.t.to_search(self.p), self.x)) + + def test_to_model(self): + # Test inverse transform + self.assertTrue(np.allclose(self.t.to_model(self.x), self.p)) + + def test_n_parameters(self): + # Test n_parameters + self.assertEqual(self.t.n_parameters(), 4) + + def test_jacobian(self): + # Test Jacobian + self.assertTrue(np.allclose(self.t.jacobian(self.x), self.j)) + + def test_jacobian_S1(self): + # Test Jacobian derivatives + calc_mat, calc_deriv = self.t.jacobian_S1(self.x) + self.assertTrue(np.allclose(calc_mat, self.j)) + self.assertTrue(np.allclose(calc_deriv, self.j_s1)) + + def test_log_jacobian_det(self): + # Test log-Jacobian determinant + self.assertEqual(self.t.log_jacobian_det(self.x), self.log_j_det) + + def test_log_jacobian_det_S1(self): + # Test log-Jacobian determinant derivatives + calc_val, calc_deriv = self.t.log_jacobian_det_S1(self.x) + self.assertEqual(calc_val, self.log_j_det) + self.assertTrue(np.all(np.equal(calc_deriv, self.log_j_det_s1))) + + def test_retransform(self): + # Test forward transform the inverse transform + self.assertTrue( + np.allclose(self.p, self.t.to_model(self.t.to_search(self.p)))) + self.assertTrue( + np.allclose(self.x, self.t.to_search(self.t.to_model(self.x)))) + + def test_elementwise(self): + # Test is elementwise + self.assertTrue(self.t.elementwise()) + + +class TestUnitCubeTransformation(unittest.TestCase): + """ + Tests the UnitCubeTransformation class. + + Most methods are tested in the ScalingTransformation tests + """ + + @classmethod + def setUpClass(cls): + # Create Transformation class + cls.lower = np.array([-1, 2, -3]) + cls.upper = np.array([0, 4, -1]) + cls.t = pints.UnitCubeTransformation(cls.lower, cls.upper) + + def test_creation(self): + # Tests creation options (at the moment just errors) + pints.UnitCubeTransformation(self.lower, self.upper) + pints.UnitCubeTransformation(self.lower, [10, 10, 10]) + pints.UnitCubeTransformation((-10, -20, -30), self.upper) + + self.assertRaisesRegex( + ValueError, 'same length', + pints.UnitCubeTransformation, (1, 2), [3]) + self.assertRaisesRegex( + ValueError, 'same length', + pints.UnitCubeTransformation, [3, 4, 5], (10, 10, 10, 10)) + self.assertRaisesRegex( + ValueError, 'must exceed', + pints.UnitCubeTransformation, (1, 2), (0, 3)) + self.assertRaisesRegex( + ValueError, 'must exceed', + pints.UnitCubeTransformation, (1, 2), (3, 1)) + self.assertRaisesRegex( + ValueError, 'must exceed', + pints.UnitCubeTransformation, (1, 2), (1, 3)) + self.assertRaisesRegex( + ValueError, 'must exceed', + pints.UnitCubeTransformation, (1, 2), (3, 2)) + + def test_to_search(self): + # Test forward transform + self.assertTrue(np.allclose(self.t.to_search(self.lower), [0, 0, 0])) + self.assertTrue(np.allclose(self.t.to_search(self.upper), [1, 1, 1])) + + def test_to_model(self): + # Test inverse transform + self.assertTrue(np.allclose(self.t.to_model([0, 0, 0]), self.lower)) + self.assertTrue(np.allclose(self.t.to_model([1, 1, 1]), self.upper)) + + class TestTransformedWrappers(unittest.TestCase): def test_transformed_boundaries(self):
UnitCubeTransformation or ScalingTranslationTransformation I think some methods like having parameters that live on the unit cube, so we should maybe add a UnitCubeTransformation or a more general scaling & translation transformation? We've already got a `ScalingTransformation`, which could maybe be adapted to also have an `offset` constructor argument? There's also a `RectangularBoundariesTransformation` that maps from rectangular boundaries onto R^n. Perhaps this name should be changed as there's other transformations you might want to do based on rectangular boundaries? Thoughts @chonlei ?
0.0
9acb2380dbd1b930a03de8e7e33885e7ab41fa3c
[ "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_creation", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_elementwise", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_jacobian", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_jacobian_S1", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_n_parameters", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_retransform", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_to_model", "pints/tests/test_transformation.py::TestScalingTransformationWithTranslation::test_to_search", "pints/tests/test_transformation.py::TestUnitCubeTransformation::test_creation", "pints/tests/test_transformation.py::TestUnitCubeTransformation::test_to_model", "pints/tests/test_transformation.py::TestUnitCubeTransformation::test_to_search" ]
[ "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_convert_std_and_cov", "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_jacobian", "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestAbstractClassTransformation::test_to_model", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_bad_constructor", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_elementwise", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_jacobian", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_jacobian_S1", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_log_jacobian_det", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_n_parameters", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_retransform", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_to_model", "pints/tests/test_transformation.py::TestComposedTransformationElementWise::test_to_search", "pints/tests/test_transformation.py::TestComposedTransformation::test_against_elementwise_transformation", "pints/tests/test_transformation.py::TestComposedTransformation::test_bad_constructor", "pints/tests/test_transformation.py::TestComposedTransformation::test_elementwise", "pints/tests/test_transformation.py::TestComposedTransformation::test_jacobian", "pints/tests/test_transformation.py::TestComposedTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestComposedTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestComposedTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestComposedTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestComposedTransformation::test_retransform", "pints/tests/test_transformation.py::TestComposedTransformation::test_to_model", "pints/tests/test_transformation.py::TestComposedTransformation::test_to_search", "pints/tests/test_transformation.py::TestIdentityTransformation::test_elementwise", "pints/tests/test_transformation.py::TestIdentityTransformation::test_jacobian", "pints/tests/test_transformation.py::TestIdentityTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestIdentityTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestIdentityTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestIdentityTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestIdentityTransformation::test_retransform", "pints/tests/test_transformation.py::TestIdentityTransformation::test_to_model", "pints/tests/test_transformation.py::TestIdentityTransformation::test_to_search", "pints/tests/test_transformation.py::TestLogitTransformation::test_elementwise", "pints/tests/test_transformation.py::TestLogitTransformation::test_invalid_inputs", "pints/tests/test_transformation.py::TestLogitTransformation::test_jacobian", "pints/tests/test_transformation.py::TestLogitTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestLogitTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestLogitTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestLogitTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestLogitTransformation::test_retransform", "pints/tests/test_transformation.py::TestLogitTransformation::test_to_model", "pints/tests/test_transformation.py::TestLogitTransformation::test_to_search", "pints/tests/test_transformation.py::TestLogTransformation::test_elementwise", "pints/tests/test_transformation.py::TestLogTransformation::test_invalid_inputs", "pints/tests/test_transformation.py::TestLogTransformation::test_jacobian", "pints/tests/test_transformation.py::TestLogTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestLogTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestLogTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestLogTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestLogTransformation::test_retransform", "pints/tests/test_transformation.py::TestLogTransformation::test_to_model", "pints/tests/test_transformation.py::TestLogTransformation::test_to_search", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_bad_constructor", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_elementwise", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_jacobian", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_retransform", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_to_model", "pints/tests/test_transformation.py::TestRectangularBoundariesTransformation::test_to_search", "pints/tests/test_transformation.py::TestScalingTransformation::test_elementwise", "pints/tests/test_transformation.py::TestScalingTransformation::test_jacobian", "pints/tests/test_transformation.py::TestScalingTransformation::test_jacobian_S1", "pints/tests/test_transformation.py::TestScalingTransformation::test_log_jacobian_det", "pints/tests/test_transformation.py::TestScalingTransformation::test_log_jacobian_det_S1", "pints/tests/test_transformation.py::TestScalingTransformation::test_n_parameters", "pints/tests/test_transformation.py::TestScalingTransformation::test_retransform", "pints/tests/test_transformation.py::TestScalingTransformation::test_to_model", "pints/tests/test_transformation.py::TestScalingTransformation::test_to_search", "pints/tests/test_transformation.py::TestTransformedWrappers::test_transformed_boundaries", "pints/tests/test_transformation.py::TestTransformedWrappers::test_transformed_error_measure", "pints/tests/test_transformation.py::TestTransformedWrappers::test_transformed_log_pdf" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-08-15 11:00:51+00:00
bsd-3-clause
4,566
pints-team__pints-1462
diff --git a/CHANGELOG.md b/CHANGELOG.md index e79ec596..7b1b6208 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ All notable changes to this project will be documented in this file. ## Unreleased ### Added +- [#1462](https://github.com/pints-team/pints/pull/1461) The `OptimisationController` now has a stopping criterion `max_evaluations`. - [#1460](https://github.com/pints-team/pints/pull/1460) Added the `Adam` local optimiser. - [#1459](https://github.com/pints-team/pints/pull/1459) Added the `iRprop-` local optimiser. - [#1456](https://github.com/pints-team/pints/pull/1456) Added an optional `translation` to `ScalingTransform` and added a `UnitCubeTransformation` class. diff --git a/pints/_optimisers/__init__.py b/pints/_optimisers/__init__.py index 2c99a600..7d93b029 100644 --- a/pints/_optimisers/__init__.py +++ b/pints/_optimisers/__init__.py @@ -455,6 +455,9 @@ class OptimisationController(object): self._unchanged_threshold = 1 # smallest significant f change self.set_max_unchanged_iterations() + # Maximum evaluations + self._max_evaluations = None + # Threshold value self._threshold = None @@ -470,6 +473,13 @@ class OptimisationController(object): """ return self._evaluations + def max_evaluations(self): + """ + Returns the maximum number of evaluations if this stopping criteria is + set, or ``None`` if it is not. See :meth:`set_max_evaluations`. + """ + return self._max_evaluations + def f_guessed_tracking(self): """ Returns ``True`` if the controller is set to track the optimiser @@ -539,6 +549,7 @@ class OptimisationController(object): has_stopping_criterion = False has_stopping_criterion |= (self._max_iterations is not None) has_stopping_criterion |= (self._unchanged_max_iterations is not None) + has_stopping_criterion |= (self._max_evaluations is not None) has_stopping_criterion |= (self._threshold is not None) if not has_stopping_criterion: raise ValueError('At least one stopping criterion must be set.') @@ -617,7 +628,8 @@ class OptimisationController(object): # Add fields to log max_iter_guess = max(self._max_iterations or 0, 10000) - max_eval_guess = max_iter_guess * pop_size + max_eval_guess = max( + self._max_evaluations or 0, max_iter_guess * pop_size) logger.add_counter('Iter.', max_value=max_iter_guess) logger.add_counter('Eval.', max_value=max_eval_guess) logger.add_float('Best') @@ -691,6 +703,14 @@ class OptimisationController(object): halt_message = ('No significant change for ' + str(unchanged_iterations) + ' iterations.') + # Maximum number of evaluations + if (self._max_evaluations is not None and + evaluations >= self._max_evaluations): + running = False + halt_message = ( + 'Maximum number of evaluations (' + + str(self._max_evaluations) + ') reached.') + # Threshold value halt = (self._threshold is not None and f_new < self._threshold) @@ -827,6 +847,21 @@ class OptimisationController(object): """ self._log_to_screen = True if enabled else False + def set_max_evaluations(self, evaluations=None): + """ + Adds a stopping criterion, allowing the routine to halt after the + given number of ``evaluations``. + + This criterion is disabled by default. To enable, pass in any positive + integer. To disable again, use ``set_max_evaluations(None)``. + """ + if evaluations is not None: + evaluations = int(evaluations) + if evaluations < 0: + raise ValueError( + 'Maximum number of evaluations cannot be negative.') + self._max_evaluations = evaluations + def set_max_iterations(self, iterations=10000): """ Adds a stopping criterion, allowing the routine to halt after the
pints-team/pints
f75911eaf1cb861624d9a4dd2829596871b1a2a1
diff --git a/pints/tests/test_opt_optimisation_controller.py b/pints/tests/test_opt_controller.py similarity index 95% rename from pints/tests/test_opt_optimisation_controller.py rename to pints/tests/test_opt_controller.py index 8d8d3151..d9b06cc5 100755 --- a/pints/tests/test_opt_optimisation_controller.py +++ b/pints/tests/test_opt_controller.py @@ -193,6 +193,23 @@ class TestOptimisationController(unittest.TestCase): self.assertEqual(x.shape, (2, )) self.assertTrue(b.check(x)) + def test_stopping_max_evaluations(self): + # Runs an optimisation with the max_fevals stopping criterion. + + r = pints.toy.TwistedGaussianLogPDF(2, 0.01) + x = np.array([0, 1.01]) + b = pints.RectangularBoundaries([-0.01, 0.95], [0.01, 1.05]) + s = 0.01 + opt = pints.OptimisationController(r, x, s, b, method=method) + opt.set_log_to_screen(True) + opt.set_max_unchanged_iterations(None) + opt.set_max_evaluations(10) + self.assertEqual(opt.max_evaluations(), 10) + self.assertRaises(ValueError, opt.set_max_evaluations, -1) + with StreamCapture() as c: + opt.run() + self.assertIn('Halting: Maximum number of evaluations', c.text()) + def test_stopping_max_iterations(self): # Runs an optimisation with the max_iter stopping criterion.
Add `max_evaluations` stopping criteria To OptimisationController When comparing optimisers, or trying to write some hybrid/combined method, the concept of an "evaluation budget" is quite common, and can be easily implemented if we set a `max_evaluations` stopping criterion
0.0
f75911eaf1cb861624d9a4dd2829596871b1a2a1
[ "pints/tests/test_opt_controller.py::TestOptimisationController::test_stopping_max_evaluations" ]
[ "pints/tests/test_opt_controller.py::TestOptimisationController::test_best_vs_guessed", "pints/tests/test_opt_controller.py::TestOptimisationController::test_callback", "pints/tests/test_opt_controller.py::TestOptimisationController::test_deprecated_alias", "pints/tests/test_opt_controller.py::TestOptimisationController::test_exception_on_multi_use", "pints/tests/test_opt_controller.py::TestOptimisationController::test_logging", "pints/tests/test_opt_controller.py::TestOptimisationController::test_optimise", "pints/tests/test_opt_controller.py::TestOptimisationController::test_parallel", "pints/tests/test_opt_controller.py::TestOptimisationController::test_post_run_statistics", "pints/tests/test_opt_controller.py::TestOptimisationController::test_set_population_size", "pints/tests/test_opt_controller.py::TestOptimisationController::test_stopping_max_iterations", "pints/tests/test_opt_controller.py::TestOptimisationController::test_stopping_max_unchanged", "pints/tests/test_opt_controller.py::TestOptimisationController::test_stopping_no_criterion", "pints/tests/test_opt_controller.py::TestOptimisationController::test_stopping_threshold", "pints/tests/test_opt_controller.py::TestOptimisationController::test_transform" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-08-17 13:26:26+00:00
bsd-3-clause
4,567
pints-team__pints-1499
diff --git a/README.md b/README.md index 9c8b3bf0..69b30e83 100644 --- a/README.md +++ b/README.md @@ -76,8 +76,11 @@ To see what's changed in the latest release, see the [CHANGELOG](https://github. ## Contributing to PINTS -If you'd like to help us develop PINTS by adding new methods, writing documentation, or fixing embarassing bugs, please have a look at these [guidelines](https://github.com/pints-team/pints/blob/main/CONTRIBUTING.md) first. +There are lots of ways to contribute to PINTS development, and anyone is free to join in! +For example, you can report problems or make feature requests on the [issues](https://github.com/pints-team/pints/issues) pages. +Similarly, if you want to contribute documentation or code you can tell us your idea on this page, and then provide a pull request for review. +Because PINTS is a big project, we've written extensive [contribution guidelines](https://github.com/pints-team/pints/blob/master/CONTRIBUTING.md) to help standardise the code -- but don't worry, this will become clear during review. ## License diff --git a/docs/source/log_priors.rst b/docs/source/log_priors.rst index 03a5291c..65c1bb0c 100644 --- a/docs/source/log_priors.rst +++ b/docs/source/log_priors.rst @@ -23,6 +23,7 @@ Overview: - :class:`HalfCauchyLogPrior` - :class:`InverseGammaLogPrior` - :class:`LogNormalLogPrior` +- :class:`LogUniformLogPrior` - :class:`MultivariateGaussianLogPrior` - :class:`NormalLogPrior` - :class:`StudentTLogPrior` @@ -48,6 +49,8 @@ Overview: .. autoclass:: LogNormalLogPrior +.. autoclass:: LogUniformLogPrior + .. autoclass:: MultivariateGaussianLogPrior .. autoclass:: NormalLogPrior diff --git a/pints/__init__.py b/pints/__init__.py index e6448e64..1c1591d5 100644 --- a/pints/__init__.py +++ b/pints/__init__.py @@ -100,6 +100,7 @@ from ._log_priors import ( HalfCauchyLogPrior, InverseGammaLogPrior, LogNormalLogPrior, + LogUniformLogPrior, MultivariateGaussianLogPrior, NormalLogPrior, StudentTLogPrior, diff --git a/pints/_log_priors.py b/pints/_log_priors.py index eb669ab9..77edbac6 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -748,6 +748,74 @@ class LogNormalLogPrior(pints.LogPrior): s=self._scale, size=(n, 1)) +class LogUniformLogPrior(pints.LogPrior): + r""" + Defines a log-uniform prior over a given range. + + The range includes the lower and upper boundaries, so that any + point ``x`` with a non-zero prior must have ``0 < a <= x < b``. + + In 1D this has pdf + + .. math:: + f(x|a,b)=\begin{cases}0,&\text{if }x\not\in + [a,b]\\\frac{1}{x \log(\frac{b}{a})} + ,&\text{if }x\in[a,b]\end{cases}. + + A random variable :math:`X` distributed according to this pdf has + expectation + + .. math:: + \mathrm{E}(X)=\frac{b-a}{\log(b/a)}. + + For example, to create a prior with :math:`x\in[1e-2,1e2]`, use:: + + p = pints.LogUniformLogPrior(1e-2, 1e2) + + Extends :class:`LogPrior`. + """ + def __init__(self, a, b): + if a <= 0: + raise ValueError("a must be > 0") + if b <= a: + raise ValueError("b must be > a > 0") + + self._a = a + self._b = b + #constant for S1 evaluation + self._c = np.divide(1, np.log(np.divide(b, a))) + + def __call__(self, x): + return scipy.stats.loguniform.logpdf(x, self._a, self._b) + + def cdf(self, x): + """ See :meth:`LogPrior.cdf()`. """ + return scipy.stats.loguniform.cdf(x, self._a, self._b) + + def icdf(self, p): + """ See :meth:`LogPrior.icdf()`. """ + return scipy.stats.loguniform.ppf(p, self._a, self._b) + + def evaluateS1(self, x): + """ See :meth:`LogPrior.evaluateS1()`. """ + dp = np.array(- 1 / x) + # Set values outside limits to nan + dp[(np.asarray(x) < self._a) | (np.asarray(x) > self._b)] = np.nan + return self(x), dp + + def mean(self): + """ See :meth:`LogPrior.mean()`. """ + return scipy.stats.loguniform.mean(self._a, self._b) + + def n_parameters(self): + """ See :meth:`LogPrior.n_parameters()`. """ + return 1 + + def sample(self, n=1): + """ See :meth:`LogPrior.sample()`. """ + return scipy.stats.loguniform.rvs(self._a, self._b, size=(n, 1)) + + class MultivariateGaussianLogPrior(pints.LogPrior): r""" Defines a multivariate Gaussian (log) prior with a given ``mean`` and
pints-team/pints
4323e8b25a8874799cf4086e6d3cf1a1bcbf0bac
diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py index 499ac341..23949fd2 100755 --- a/pints/tests/test_log_priors.py +++ b/pints/tests/test_log_priors.py @@ -595,6 +595,34 @@ class TestPrior(unittest.TestCase): mean = np.mean(samples1).item() self.assertTrue(9. < mean < 11.) + def test_log_uniform_prior(self): + + #Test input parameters + self.assertRaises(ValueError, pints.LogUniformLogPrior, 0, 1) + self.assertRaises(ValueError, pints.LogUniformLogPrior, 1, 1) + + a = 1e-2 + b = 1e2 + + p = pints.LogUniformLogPrior(a, b) + + #all values below were calculated separately (not by scipy) + self.assertAlmostEqual(p.mean(), 10.856276311376536) + + #test n_parameters + self.assertEqual(p.n_parameters(), 1) + + points = [0.1, 63.0] + vals = [0.08225828662619909, -6.36346153275938] + dvals = [-10.0, -0.015873015873015872] + + for point, val, dval in zip(points, vals, dvals): + test_val_1, test_dval = p.evaluateS1(point) + test_val_2 = p(point) + self.assertEqual(test_val_1, test_val_2) + self.assertAlmostEqual(test_val_1, val) + self.assertAlmostEqual(test_dval, dval) + def test_log_normal_prior(self): # Test input parameters @@ -657,6 +685,21 @@ class TestPrior(unittest.TestCase): self.assertAlmostEqual(pints_val, scipy_val) self.assertAlmostEqual(pints_deriv[0], hand_calc_deriv) + def test_log_uniform_prior_cdf_icdf(self): + p1 = pints.LogUniformLogPrior(1e-2, 1e2) + self.assertAlmostEqual(p1.cdf(0.1), 0.25) + self.assertAlmostEqual(p1.cdf(10), 0.75) + self.assertAlmostEqual(p1.icdf(0.25), 0.1) + self.assertAlmostEqual(p1.icdf(0.75), 10.0) + + def test_log_uniform_prior_sampling(self): + p1 = pints.LogUniformLogPrior(1e-2, 1e2) + samples = p1.sample(1000000) + mean = p1.mean() + sample_mean = np.mean(samples) + self.assertEqual(len(samples), 1000000) + self.assertLessEqual(np.abs(sample_mean - mean), 0.1) + def test_log_normal_prior_cdf_icdf(self): p1 = pints.LogNormalLogPrior(-3.5, 7.7) self.assertAlmostEqual(p1.cdf(1.1), 0.6797226585187124)
Add reciprocal distribution prior (LogUniformLogPrior) to help estimate parameters that vary over multiple orders of magnitude Hi, thanks for a great package! It would be great to have a [reciprocal/log uniform](https://en.wikipedia.org/wiki/Reciprocal_distribution) prior. It should be straightforward since it's implemented in `scipy.stats`, and I would be happy to implement it.
0.0
4323e8b25a8874799cf4086e6d3cf1a1bcbf0bac
[ "pints/tests/test_log_priors.py::TestPrior::test_log_uniform_prior", "pints/tests/test_log_priors.py::TestPrior::test_log_uniform_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_log_uniform_prior_sampling" ]
[ "pints/tests/test_log_priors.py::TestPrior::test_beta_prior", "pints/tests/test_log_priors.py::TestPrior::test_beta_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_cauchy_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_cauchy_prior", "pints/tests/test_log_priors.py::TestPrior::test_cauchy_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_composed_prior", "pints/tests/test_log_priors.py::TestPrior::test_composed_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_composed_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_exponential_prior", "pints/tests/test_log_priors.py::TestPrior::test_exponential_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_exponential_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_gamma_prior", "pints/tests/test_log_priors.py::TestPrior::test_gamma_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_gamma_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_gaussian_prior", "pints/tests/test_log_priors.py::TestPrior::test_gaussian_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_gaussian_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_half_cauchy_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_half_cauchy_prior", "pints/tests/test_log_priors.py::TestPrior::test_half_cauchy_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_inverse_gamma_prior", "pints/tests/test_log_priors.py::TestPrior::test_inverse_gamma_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_inverse_gamma_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_log_normal_prior", "pints/tests/test_log_priors.py::TestPrior::test_log_normal_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_log_normal_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_multivariate_normal_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_multivariate_normal_prior", "pints/tests/test_log_priors.py::TestPrior::test_multivariate_normal_sampling", "pints/tests/test_log_priors.py::TestPrior::test_student_t_prior", "pints/tests/test_log_priors.py::TestPrior::test_student_t_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_student_t_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_truncated_gaussian_prior", "pints/tests/test_log_priors.py::TestPrior::test_truncated_gaussian_prior_cdf_icdf", "pints/tests/test_log_priors.py::TestPrior::test_truncated_gaussian_prior_sampling", "pints/tests/test_log_priors.py::TestPrior::test_uniform_prior", "pints/tests/test_log_priors.py::TestPrior::test_uniform_prior_cdf", "pints/tests/test_log_priors.py::TestPrior::test_uniform_prior_icdf", "pints/tests/test_log_priors.py::TestPrior::test_uniform_prior_sampling" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2023-10-18 19:51:46+00:00
bsd-3-clause
4,568
pior__pyramid_useragent-3
diff --git a/pyramid_useragent/__init__.py b/pyramid_useragent/__init__.py index a06e6d0..c6a71ac 100644 --- a/pyramid_useragent/__init__.py +++ b/pyramid_useragent/__init__.py @@ -27,7 +27,7 @@ def get_user_agent_parsed(request): return UserAgent(request.user_agent) def get_user_agent_classified(request): - return UserAgentClassifier(request.user_agent) + return UserAgentClassifier(request.user_agent or '') class UserAgentComponent(object):
pior/pyramid_useragent
ca27e28f2b19a2c68a8c3a4a555ee0420b0c382b
diff --git a/pyramid_useragent/tests.py b/pyramid_useragent/tests.py index 1bb7104..4c771a5 100644 --- a/pyramid_useragent/tests.py +++ b/pyramid_useragent/tests.py @@ -28,6 +28,15 @@ class TestPyramidUserAgent(unittest.TestCase): self.assertIsInstance(resp, UserAgentClassifier) self.assertTrue(resp.is_mobile) + def test_no_user_agent(self): + from pyramid_useragent import (get_user_agent_classified, UserAgentClassifier) + + request = mock.Mock() + request.user_agent = None + + resp = get_user_agent_classified(request) + self.assertIsInstance(resp, UserAgentClassifier) + def test_safety(self): from pyramid_useragent import UserAgent
Fails if request.user_agent is None Anonymous report on Bitbucket: > In my unit tests using WebTest, request.user_agent comes through as None. This is probably the behavior of pyramid when the User-Agent header is not provided. https://bitbucket.org/pior/pyramid_useragent/issues/3/fails-if-requestuser_agent-is-none
0.0
ca27e28f2b19a2c68a8c3a4a555ee0420b0c382b
[ "pyramid_useragent/tests.py::TestPyramidUserAgent::test_no_user_agent" ]
[ "pyramid_useragent/tests.py::TestPyramidUserAgent::test_components_comment", "pyramid_useragent/tests.py::TestPyramidUserAgent::test_get_user_agent_classified", "pyramid_useragent/tests.py::TestPyramidUserAgent::test_get_user_agent_parsed", "pyramid_useragent/tests.py::TestPyramidUserAgent::test_safety" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2018-01-26 15:45:36+00:00
bsd-4-clause
4,569
piotrmaslanka__falcon-ratelimit-5
diff --git a/.travis.yml b/.travis.yml index 6aeb9d4..f6c244e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,15 @@ language: python python: - - "2.7" - "3.5" - "3.4" - "pypy" cache: pip +services: + - redis-server install: - pip install -r requirements.txt - pip install --force-reinstall "coverage>=4.0,<4.4" codeclimate-test-reporter script: - - sudo redis-server /etc/redis/redis.conf --port 6379 - python setup.py nosetests --with-coverage after_success: - codeclimate-test-reporter diff --git a/falconratelimit/ratelimit.py b/falconratelimit/ratelimit.py index 6e8641d..c912a39 100644 --- a/falconratelimit/ratelimit.py +++ b/falconratelimit/ratelimit.py @@ -89,7 +89,7 @@ def rate_limit(per_second=30, resource=u'default', window_size=10, redis_url=None): arg = Argument(resource, window_size, per_second, error_message, redis_url) - def hook(req, resp, params): + def hook(req, resp, resource, params): if redis_url: try: redis
piotrmaslanka/falcon-ratelimit
0f0573259c67fe01f673c99e8c29a8c4a7225690
diff --git a/tests/test_ratelimit.py b/tests/test_ratelimit.py index fda9683..b2f2078 100644 --- a/tests/test_ratelimit.py +++ b/tests/test_ratelimit.py @@ -23,7 +23,7 @@ import redis class RedisResource(object): @falcon.before( - rate_limit(redis_url='localhost:6379', per_second=1, window_size=5, + rate_limit(redis_url='redis://localhost:6379/0', per_second=1, window_size=5, resource='on_get')) def on_get(self, req, resp): resp.status = falcon.HTTP_200
falcon-ratelimit 1.1 is broken with falcon 2.0.0 FYI, looks like `falcon-ratelimit==1.1` is broken with the latest version of falcon, which is `falcon==2.0.0` ``` ❯ gunicorn hello.app:api [2019-05-24 14:10:19 -0300] [16138] [INFO] Starting gunicorn 19.9.0 [2019-05-24 14:10:19 -0300] [16138] [INFO] Listening at: http://127.0.0.1:8000 (16138) [2019-05-24 14:10:19 -0300] [16138] [INFO] Using worker: sync [2019-05-24 14:10:19 -0300] [16141] [INFO] Booting worker with pid: 16141 [2019-05-24 14:10:22 -0300] [16141] [ERROR] Error handling request /ping Traceback (most recent call last): File "/home/viniarck/repos/hello_api/.direnv/python-3.5.5/lib/python3.5/site-package s/gunicorn/workers/sync.py", line 135, in handle self.handle_request(listener, req, client, addr) File "/home/viniarck/repos/hello_api/.direnv/python-3.5.5/lib/python3.5/site-package s/gunicorn/workers/sync.py", line 176, in handle_request respiter = self.wsgi(environ, resp.start_response) File "falcon/api.py", line 274, in falcon.api.API.__call__ File "falcon/api.py", line 269, in falcon.api.API.__call__ File "/home/viniarck/repos/hello_api/.direnv/python-3.5.5/lib/python3.5/site-package s/falcon/hooks.py", line 179, in do_before action(req, resp, self, kwargs, *action_args, **action_kwargs) TypeError: hook() takes 3 positional arguments but 4 were given ``` Code sample to reproduce: ``` import falcon from falconratelimit import rate_limit class PingResource(object): @falcon.before(rate_limit(per_second=5, window_size=30)) def on_get(self, req, resp) -> None: resp.media = {"test": "hello"} api = falcon.API() api.add_route("/ping", PingResource()) ``` Testing the endpoint ``` ❯ http GET :8000/ping HTTP/1.1 500 Internal Server Error Connection: close Content-Length: 141 Content-Type: text/html <html> <head> <title>Internal Server Error</title> </head> <body> <h1><p>Internal Server Error</p></h1> </body> </html> ``` This same example works fine with falcon 1.4.1. Thanks!
0.0
0f0573259c67fe01f673c99e8c29a8c4a7225690
[ "tests/test_ratelimit.py::TestRatelimit::test_limit_ok" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2020-04-13 18:57:13+00:00
mit
4,570
piotrmaslanka__satella-11
diff --git a/satella/coding/recast_exceptions.py b/satella/coding/recast_exceptions.py index bf164db4..32e4e1c8 100644 --- a/satella/coding/recast_exceptions.py +++ b/satella/coding/recast_exceptions.py @@ -45,7 +45,8 @@ class rethrow_as(object): """ # You can also provide just two exceptions - if len(pairs) == 2 and all(issubclass(p, BaseException) for p in pairs): + if len(pairs) == 2 and not isinstance(pairs[1], (tuple, list)) \ + and all(issubclass(p, BaseException) for p in pairs): self.mapping = {pairs[0]: pairs[1]} else: self.mapping = dict(pairs)
piotrmaslanka/satella
5fa0a67e4d35431f2f54740ba8fcfbd7f6d8bc59
diff --git a/tests/test_coding/test_rethrow.py b/tests/test_coding/test_rethrow.py index ce17f722..80dd8ec6 100644 --- a/tests/test_coding/test_rethrow.py +++ b/tests/test_coding/test_rethrow.py @@ -39,4 +39,17 @@ class TestStuff(unittest.TestCase): def lol(): raise ValueError() - self.assertRaises(NameError, lol) \ No newline at end of file + self.assertRaises(NameError, lol) + + def test_issue_10(self): + + class WTFException1(Exception): pass + class WTFException2(Exception): pass + + @rethrow_as((NameError, WTFException1), + (TypeError, WTFException2)) + def provide(exc): + raise exc() + + self.assertRaises(WTFException1, lambda: provide(NameError)) + self.assertRaises(WTFException2, lambda: provide(TypeError))
Bad rethrow_as ```python @rethrow_as((UnicodeDecodeError, ConfigurationMalformed), (json.JSONDecodeError, ConfigurationMalformed)) @rethrow_as(ValueError, ConfigurationMalformed) @rethrow_as(binascii.Error, ConfigurationMalformed) @rethrow_as(TypeError, ConfigurationError) def provide(self): return json.loads(self.root, encoding=self.encoding) ``` breaks it by treating two first pairs in a wrong way
0.0
5fa0a67e4d35431f2f54740ba8fcfbd7f6d8bc59
[ "tests/test_coding/test_rethrow.py::TestStuff::test_issue_10" ]
[ "tests/test_coding/test_rethrow.py::TestStuff::test_rethrow", "tests/test_coding/test_rethrow.py::TestStuff::test_rethrow_2", "tests/test_coding/test_rethrow.py::TestStuff::test_rethrow_3", "tests/test_coding/test_rethrow.py::TestStuff::test_silencer", "tests/test_coding/test_rethrow.py::TestStuff::test_silencer_2" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2018-02-16 16:59:11+00:00
bsd-3-clause
4,571
pjknkda__flake8-datetimez-3
diff --git a/README.md b/README.md index 9a5a3f6..4697b9f 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ A plugin for flake8 to ban the usage of unsafe naive datetime class. - **DTZ006** : The use of `datetime.datetime.fromtimestamp()` without `tz` argument is not allowed. -- **DTZ007** : The use of `datetime.datetime.strptime()` must be followed by `.replace(tzinfo=)`. +- **DTZ007** : The use of `datetime.datetime.strptime()` without %z must be followed by `.replace(tzinfo=)`. - **DTZ011** : The use of `datetime.date.today()` is not allowed. Use `datetime.datetime.now(tz=).date()` instead. diff --git a/flake8_datetimez.py b/flake8_datetimez.py index 2246bb9..33496ca 100644 --- a/flake8_datetimez.py +++ b/flake8_datetimez.py @@ -6,6 +6,11 @@ from functools import partial import pycodestyle +try: + STRING_NODE = ast.Str +except AttributeError: # ast.Str is deprecated in Python3.8 + STRING_NODE = ast.Constant + def _get_from_keywords(keywords, arg): for keyword in keywords: @@ -132,8 +137,11 @@ class DateTimeZVisitor(ast.NodeVisitor): is_case_1 = (tzinfo_keyword is not None and not (isinstance(tzinfo_keyword.value, ast.NameConstant) and tzinfo_keyword.value.value is None)) + # ex: `datetime.strptime(..., '...%z...')` + is_case_2 = ((1 < len(node.args)) and isinstance(node.args[1], STRING_NODE) + and ('%z' in node.args[1].s)) - if not is_case_1: + if not (is_case_1 or is_case_2): self.errors.append(DTZ007(node.lineno, node.col_offset)) # ex: `date.something()`` @@ -189,7 +197,7 @@ DTZ006 = Error( ) DTZ007 = Error( - message='DTZ007 The use of `datetime.datetime.strptime()` must be followed by `.replace(tzinfo=)`.' + message='DTZ007 The use of `datetime.datetime.strptime()` without %z must be followed by `.replace(tzinfo=)`.' ) DTZ011 = Error(
pjknkda/flake8-datetimez
98316ab383fd49ad438fe4f675f7d84b3f1150a8
diff --git a/test_datetimez.py b/test_datetimez.py index fd4559c..6c5fa43 100644 --- a/test_datetimez.py +++ b/test_datetimez.py @@ -163,6 +163,18 @@ class TestDateTimeZ(unittest.TestCase): ) self.assert_codes(errors, []) + def test_DTZ007_good_format(self): + errors = self.write_file_and_run_checker( + 'datetime.datetime.strptime(something, "%H:%M:%S%z")' + ) + self.assert_codes(errors, []) + + def test_DTZ007_bad_format(self): + errors = self.write_file_and_run_checker( + 'datetime.datetime.strptime(something, "%H:%M:%S%Z")' + ) + self.assert_codes(errors, ['DTZ007']) + def test_DTZ007_no_replace(self): errors = self.write_file_and_run_checker( 'datetime.datetime.strptime(something, something)'
datetime.datetime.strptime() triggers DTZ007 when using '%z' AFAICT having `%z` in the format string always produces timezone aware datetime objects. Putting a `.replace(tz=)` after one would likely introduce a bug by switching to the wrong timezone. Ideally DTZ007 should not be triggered when `%z` is present in the format string, and possibly add a new error for using `.replace(tz=)` after one that does. Note that `%Z` does *not* produce timezone aware datetimes.
0.0
98316ab383fd49ad438fe4f675f7d84b3f1150a8
[ "test_datetimez.py::TestDateTimeZ::test_DTZ007_good_format" ]
[ "test_datetimez.py::TestDateTimeZ::test_DTZ001_args_good", "test_datetimez.py::TestDateTimeZ::test_DTZ001_kwargs_good", "test_datetimez.py::TestDateTimeZ::test_DTZ001_no_args", "test_datetimez.py::TestDateTimeZ::test_DTZ001_no_kwargs", "test_datetimez.py::TestDateTimeZ::test_DTZ001_none_args", "test_datetimez.py::TestDateTimeZ::test_DTZ001_none_kwargs", "test_datetimez.py::TestDateTimeZ::test_DTZ002", "test_datetimez.py::TestDateTimeZ::test_DTZ003", "test_datetimez.py::TestDateTimeZ::test_DTZ004", "test_datetimez.py::TestDateTimeZ::test_DTZ005_args_good", "test_datetimez.py::TestDateTimeZ::test_DTZ005_keywords_good", "test_datetimez.py::TestDateTimeZ::test_DTZ005_no_args", "test_datetimez.py::TestDateTimeZ::test_DTZ005_none_args", "test_datetimez.py::TestDateTimeZ::test_DTZ005_none_keywords", "test_datetimez.py::TestDateTimeZ::test_DTZ005_wrong_keywords", "test_datetimez.py::TestDateTimeZ::test_DTZ006_args_good", "test_datetimez.py::TestDateTimeZ::test_DTZ006_keywords_good", "test_datetimez.py::TestDateTimeZ::test_DTZ006_no_args", "test_datetimez.py::TestDateTimeZ::test_DTZ006_none_args", "test_datetimez.py::TestDateTimeZ::test_DTZ006_none_keywords", "test_datetimez.py::TestDateTimeZ::test_DTZ006_wrong_keywords", "test_datetimez.py::TestDateTimeZ::test_DTZ007_bad_format", "test_datetimez.py::TestDateTimeZ::test_DTZ007_good", "test_datetimez.py::TestDateTimeZ::test_DTZ007_no_replace", "test_datetimez.py::TestDateTimeZ::test_DTZ007_none_replace", "test_datetimez.py::TestDateTimeZ::test_DTZ007_wrong_replace", "test_datetimez.py::TestDateTimeZ::test_DTZ011", "test_datetimez.py::TestDateTimeZ::test_DTZ012" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-06-02 21:55:05+00:00
mit
4,572
planetarypy__pvl-102
diff --git a/HISTORY.rst b/HISTORY.rst index d3324bd..de80e6e 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -34,6 +34,10 @@ Fixed +++++ * Deeply nested Aggregation Blocks (Object or Group) which had mis-matched Block Names should now properly result in LexerErrors instead of resulting in StopIteration Exceptions (Issue 100). +* The default "Omni" parsing strategy, now considers the ASCII NULL character ("\0") a "reserved character." + The practical effect is that the ASCII NULL can not be in parameter names or unquoted strings (but would still + be successfully parsed in quoted strings). This means that PVL-text that might have incorrectly used ASCII NULLs + as delimiters will once again be consumed by our omnivorous parser (Issue 98). 1.3.0 (2021-09-10) diff --git a/pvl/grammar.py b/pvl/grammar.py index 06eebf2..afa4e53 100755 --- a/pvl/grammar.py +++ b/pvl/grammar.py @@ -333,8 +333,10 @@ class OmniGrammar(PVLGrammar): def __init__(self): # Handle the fact that ISIS writes out unquoted plus signs. # See ISISGrammar for details. + # Also add the ASCII NULL ("\0") to the reserved_characters tuple. self.reserved_characters = tuple( - ISISGrammar.adjust_reserved_characters(self.reserved_characters) + ISISGrammar.adjust_reserved_characters(self.reserved_characters) + + ["\0", ] ) def char_allowed(self, char):
planetarypy/pvl
7614c0184e47b9d77ebf1297f12109f964e71e21
diff --git a/tests/test_pvl.py b/tests/test_pvl.py index b34d64e..5406372 100755 --- a/tests/test_pvl.py +++ b/tests/test_pvl.py @@ -842,6 +842,10 @@ def test_utf(): label = pvl.load(utf_file) assert label["LABEL_REVISION_NOTE"] == "V1.0" + nulllabel = pvl.loads("foo=bar END\0wont=parse") + assert nulllabel["foo"] == "bar" + assert len(nulllabel) == 1 + def test_latin1(): latin_file = os.path.join(BROKEN_DIR, "latin-1-degreesymb.pvl")
PVL Read ISIS Control network **Describe the bug** Unable to read an ISIS control network after upgrading to > 1.0. In the past, I have been able to pass a full ISIS control network to pvl and have it read the header off the network. The protobuf internals are outside the scope of the pvl library. **To Reproduce** Using the attached control network run a pvl.load(). The attached network is zipped so that GitHub will accept it as a filetype. [test.net.zip](https://github.com/planetarypy/pvl/files/7521160/test.net.zip) **Expected behavior** A clear and concise description of what you expected to happen. **Error logs or terminal captures** ```bash Traceback (most recent call last): File "/Users/jlaura/miniconda3/envs/plio/lib/python3.8/site-packages/pvl/lexer.py", line 429, in lexer t = yield t ValueError: Expecting an Aggregation Block, an Assignment Statement, or an End Statement, but found "14:37:20" ``` **Your Environment (please complete the following information):** - OS: OS X Big Sur - Environment information: (e.g. Python version, etc.) - `pvl` Version 1.3.0 **Additional context** I am not 100% sure that this is a PVL issue or an issue with the mocked control network. The Aggregation Block and lexor errors are opaque to me though, so I am looking for some additional guidance.
0.0
7614c0184e47b9d77ebf1297f12109f964e71e21
[ "tests/test_pvl.py::test_utf" ]
[ "tests/test_pvl.py::test_assignment", "tests/test_pvl.py::test_spacing", "tests/test_pvl.py::test_linewrap", "tests/test_pvl.py::test_special", "tests/test_pvl.py::test_integers", "tests/test_pvl.py::test_floats", "tests/test_pvl.py::test_exponents", "tests/test_pvl.py::test_objects", "tests/test_pvl.py::test_groups", "tests/test_pvl.py::test_alt_group_style", "tests/test_pvl.py::test_binary", "tests/test_pvl.py::test_octal", "tests/test_pvl.py::test_hex", "tests/test_pvl.py::test_quotes", "tests/test_pvl.py::test_comments", "tests/test_pvl.py::test_dates", "tests/test_pvl.py::test_set", "tests/test_pvl.py::test_sequence", "tests/test_pvl.py::test_sequence_backslashes", "tests/test_pvl.py::test_units", "tests/test_pvl.py::test_delimiters", "tests/test_pvl.py::test_isis_output", "tests/test_pvl.py::test_latin1", "tests/test_pvl.py::test_cube_label", "tests/test_pvl.py::test_cube_label_r", "tests/test_pvl.py::test_pds3_sample_image", "tests/test_pvl.py::test_load_all_sample_labels", "tests/test_pvl.py::test_unicode", "tests/test_pvl.py::test_bytes", "tests/test_pvl.py::test_end_comment", "tests/test_pvl.py::test_parse_error", "tests/test_pvl.py::test_broken_labels[broken1.lbl-expected0-expected_errors0]", "tests/test_pvl.py::test_broken_labels[broken2.lbl-expected1-expected_errors1]", "tests/test_pvl.py::test_broken_labels[broken3.lbl-expected2-expected_errors2]", "tests/test_pvl.py::test_broken_labels[broken4.lbl-expected3-expected_errors3]", "tests/test_pvl.py::test_broken_labels[broken5.lbl-expected4-expected_errors4]", "tests/test_pvl.py::test_broken_labels[broken6.lbl-expected5-expected_errors5]", "tests/test_pvl.py::test_broken_labels[broken7.lbl-expected6-expected_errors6]", "tests/test_pvl.py::test_broken_labels[broken8.lbl-expected7-expected_errors7]", "tests/test_pvl.py::test_broken_labels[broken9.lbl-expected8-expected_errors8]", "tests/test_pvl.py::test_broken_labels[broken10.lbl-expected9-expected_errors9]", "tests/test_pvl.py::test_broken_labels[broken11.lbl-expected10-expected_errors10]", "tests/test_pvl.py::test_broken_labels[broken12.lbl-expected11-expected_errors11]", "tests/test_pvl.py::test_broken_labels[broken13.lbl-expected12-expected_errors12]", "tests/test_pvl.py::test_broken_labels[broken14.lbl-expected13-expected_errors13]", "tests/test_pvl.py::test_broken_labels[broken15.lbl-expected14-expected_errors14]", "tests/test_pvl.py::test_broken_labels[broken16.lbl-expected15-expected_errors15]", "tests/test_pvl.py::test_broken_labels_LexerError[broken1.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken3.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken4.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken5.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken6.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken7.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken8.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken9.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken10.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken11.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken12.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken13.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken14.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken15.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken16.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[latin-1-degreesymb.pvl]", "tests/test_pvl.py::test_broken_labels_ParseError", "tests/test_pvl.py::test_EmptyValueAtLine", "tests/test_pvl.py::test_load_all_bad_sample_labels", "tests/test_pvl.py::test_dump_stream", "tests/test_pvl.py::test_dump_to_file", "tests/test_pvl.py::test_default_encoder", "tests/test_pvl.py::test_pds_encoder", "tests/test_pvl.py::test_special_values", "tests/test_pvl.py::test_special_strings", "tests/test_pvl.py::test_unkown_value", "tests/test_pvl.py::test_quoated_strings", "tests/test_pvl.py::test_dump_to_file_insert_before", "tests/test_pvl.py::test_dump_to_file_insert_after" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-01-20 01:02:30+00:00
bsd-3-clause
4,573
planetarypy__pvl-25
diff --git a/pvl/_collections.py b/pvl/_collections.py index afeab25..2abaf3a 100644 --- a/pvl/_collections.py +++ b/pvl/_collections.py @@ -34,6 +34,13 @@ class KeysView(MappingView): for key, _ in self._mapping: yield key + def __getitem__(self, index): + return self._mapping[index][0] + + def __repr__(self): + keys = [key for key, _ in self._mapping] + return '%s(%r)' % (type(self).__name__, keys) + class ItemsView(MappingView): def __contains__(self, item): @@ -44,6 +51,9 @@ class ItemsView(MappingView): for item in self._mapping: yield item + def __getitem__(self, index): + return self._mapping[index] + class ValuesView(MappingView): def __contains__(self, value): @@ -56,6 +66,13 @@ class ValuesView(MappingView): for _, value in self._mapping: yield value + def __getitem__(self, index): + return self._mapping[index][1] + + def __repr__(self): + values = [value for _, value in self._mapping] + return '%s(%r)' % (type(self).__name__, values) + class OrderedMultiDict(dict, MutableMapping): """A ``dict`` like container.
planetarypy/pvl
a483fee7b9b658bb0c22586dc1ab87753439b998
diff --git a/tests/test_collections.py b/tests/test_collections.py index a77f9dc..50df73d 100644 --- a/tests/test_collections.py +++ b/tests/test_collections.py @@ -368,6 +368,39 @@ def test_py2_items(): assert module.values() == [1, 2, 3] [email protected](six.PY2, reason='requires python3') +def test_py3_items(): + module = pvl.PVLModule() + + assert isinstance(module.items(), pvl._collections.ItemsView) + with pytest.raises(IndexError): + module.items()[0] + + assert isinstance(module.keys(), pvl._collections.KeysView) + with pytest.raises(IndexError): + module.keys()[0] + + assert isinstance(module.values(), pvl._collections.ValuesView) + with pytest.raises(IndexError): + module.values()[0] + + module = pvl.PVLModule([ + ('a', 1), + ('b', 2), + ('a', 3), + ]) + + assert isinstance(module.items(), pvl._collections.ItemsView) + assert module.items()[0] == ('a', 1) + + assert isinstance(module.keys(), pvl._collections.KeysView) + assert module.keys()[0] == 'a' + + assert isinstance(module.values(), pvl._collections.ValuesView) + assert module.values()[0] == 1 + + + if six.PY3: def iteritems(module): return module.items()
items is not subscriptable in python3 As pointed out in #23, in python3 ``items()`` returns an [ItemsView](https://github.com/planetarypy/pvl/blob/master/pvl/_collections.py#L38) object which is not subscriptable ``` In [1]: import pvl In [2]: label = pvl.loads(""" ...: foo = bar ...: monty = python ...: """) In [3]: items = label.items() In [4]: items[0] --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-95f461411437> in <module>() ----> 1 items[0] TypeError: 'ItemsView' object does not support indexing ``` But we should get something similar to: ``` In [4]: items[0] Out [4]: ('foo', 'bar') ```
0.0
a483fee7b9b658bb0c22586dc1ab87753439b998
[ "tests/test_collections.py::test_py3_items" ]
[ "tests/test_collections.py::test_empty", "tests/test_collections.py::test_list_creation", "tests/test_collections.py::test_dict_creation", "tests/test_collections.py::test_keyword_creation", "tests/test_collections.py::test_key_access", "tests/test_collections.py::test_index_access", "tests/test_collections.py::test_slice_access", "tests/test_collections.py::test_set", "tests/test_collections.py::test_delete", "tests/test_collections.py::test_clear", "tests/test_collections.py::test_discard", "tests/test_collections.py::test_pop", "tests/test_collections.py::test_popitem", "tests/test_collections.py::test_update", "tests/test_collections.py::test_append", "tests/test_collections.py::test_len", "tests/test_collections.py::test_repr", "tests/test_collections.py::test_iterators", "tests/test_collections.py::test_equlity", "tests/test_collections.py::test_copy" ]
{ "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
2017-03-11 21:45:30+00:00
bsd-3-clause
4,574
planetarypy__pvl-64
diff --git a/HISTORY.rst b/HISTORY.rst index d153e89..6a3f189 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -3,6 +3,13 @@ History ------- +1.0.0-alpha.6 (2020-07-27) +~~~~~~~~~~~~~~~~~~~~~~~~~~ +* Enforced that all datetime.time and datetime.datetime objects + returned should be timezone "aware." This breaks 0.x functionality + where some were and some weren't. Addresses #57. + + 1.0.0-alpha.5 (2020-05-30) ~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ISIS creates PVL text with unquoted plus signs ("+"), needed to adjust diff --git a/pvl/__init__.py b/pvl/__init__.py index c5662af..f1c6184 100755 --- a/pvl/__init__.py +++ b/pvl/__init__.py @@ -23,7 +23,7 @@ from ._collections import ( __author__ = 'The pvl Developers' __email__ = '[email protected]' -__version__ = '1.0.0-alpha.5' +__version__ = '1.0.0-alpha.6' __all__ = [ 'load', 'loads', diff --git a/pvl/decoder.py b/pvl/decoder.py index 80a3610..d172312 100644 --- a/pvl/decoder.py +++ b/pvl/decoder.py @@ -193,8 +193,16 @@ class PVLDecoder(object): seconds. However, the Python ``datetime`` classes don't support second values for more than 59 seconds. + Since the PVL Blue Book says that all PVl Date/Time Values + are represented in Universal Coordinated Time, then all + datetime objects that are returned datetime Python objects + should be timezone "aware." A datetime.date object is always + "naive" but any datetime.time or datetime.datetime objects + returned from this function will be "aware." + If a time with 60 seconds is encountered, it will not be - returned as a datetime object, but simply as a string. + returned as a datetime object (since that is not representable + via Python datetime objects), but simply as a string. The user can then then try and use the ``time`` module to parse this string into a ``time.struct_time``. We @@ -212,29 +220,46 @@ class PVLDecoder(object): numerical types, and do something useful with them. """ try: + # datetime.date objects will always be naive, so just return: return for_try_except(ValueError, datetime.strptime, repeat(value), self.grammar.date_formats).date() except ValueError: + # datetime.time and datetime.datetime might be either: + d = None try: - return for_try_except(ValueError, datetime.strptime, - repeat(value), - self.grammar.time_formats).time() + d = for_try_except(ValueError, datetime.strptime, + repeat(value), + self.grammar.time_formats).time() except ValueError: try: - return for_try_except(ValueError, datetime.strptime, - repeat(value), - self.grammar.datetime_formats) + d = for_try_except(ValueError, datetime.strptime, + repeat(value), + self.grammar.datetime_formats) except ValueError: pass + if d is not None: + if d.utcoffset() is None: + return d.replace(tzinfo=timezone.utc) + else: + return d # if we can regex a 60-second time, return str + if self.is_leap_seconds(value): + return str(value) + else: + raise ValueError + + def is_leap_seconds(self, value: str) -> bool: + """Returns True if *value* is a time that matches the + grammar's definition of a leap seconds time (a time string with + a value of 60 for the seconds value). False otherwise.""" for r in (self.grammar.leap_second_Ymd_re, self.grammar.leap_second_Yj_re): if r is not None and r.fullmatch(value) is not None: - return str(value) - - raise ValueError + return True + else: + return False def decode_quantity(self, value, unit): """Returns a Python object that represents a value with @@ -280,7 +305,7 @@ class ODLDecoder(PVLDecoder): # Otherwise ... match = re.fullmatch(r'(?P<dt>.+?)' # the part before the sign r'(?P<sign>[+-])' # required sign - r'(?P<hour>0?[1-9]|1[0-2])' # 1 to 12 + r'(?P<hour>0?[0-9]|1[0-2])' # 0 to 12 fr'(?:{self.grammar._M_frag})?', # Minutes value) if match is not None: diff --git a/pvl/encoder.py b/pvl/encoder.py index bd5378a..f1ba1d4 100644 --- a/pvl/encoder.py +++ b/pvl/encoder.py @@ -669,7 +669,7 @@ class ODLEncoder(PVLEncoder): t = super().encode_time(value) - if value.tzinfo is None: + if value.tzinfo is None or value.tzinfo == 0: return t + 'Z' else: td_str = str(value.utcoffset()) diff --git a/setup.py b/setup.py index f5fa90d..3a47230 100755 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ history = open('HISTORY.rst').read().replace('.. :changelog:', '') setup( name='pvl', - version='1.0.0-alpha.5', + version='1.0.0-alpha.6', description='Python implementation of PVL (Parameter Value Language)', long_description=readme + '\n\n' + history, author='The PlanetaryPy Developers', diff --git a/tox.ini b/tox.ini index 70acb56..c8a1ce3 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ envlist = py36, py37, py38, flake8 setenv = PYTHONPATH = {toxinidir} commands = - py.test tests + pytest deps = -r{toxinidir}/requirements.txt
planetarypy/pvl
a953ba2a4b5eb79d7e5ce8dcd30a645f13930137
diff --git a/tests/test_decoder.py b/tests/test_decoder.py index c08da7f..c9ac534 100644 --- a/tests/test_decoder.py +++ b/tests/test_decoder.py @@ -90,15 +90,19 @@ class TestDecoder(unittest.TestCase): self.assertEqual(p[1], self.d.decode_non_decimal(p[0])) def test_decode_datetime(self): + utc = datetime.timezone.utc for p in(('2001-01-01', datetime.date(2001, 1, 1)), ('2001-027', datetime.date(2001, 1, 27)), ('2001-027Z', datetime.date(2001, 1, 27)), - ('23:45', datetime.time(23, 45)), - ('01:42:57', datetime.time(1, 42, 57)), - ('12:34:56.789', datetime.time(12, 34, 56, 789000)), - ('2001-027T23:45', datetime.datetime(2001, 1, 27, 23, 45)), - ('2001-01-01T01:34Z', datetime.datetime(2001, 1, 1, 1, 34)), - ('01:42:57Z', datetime.time(1, 42, 57)), + ('23:45', datetime.time(23, 45, tzinfo=utc)), + ('01:42:57', datetime.time(1, 42, 57, tzinfo=utc)), + ('12:34:56.789', datetime.time(12, 34, 56, 789000, + tzinfo=utc)), + ('2001-027T23:45', datetime.datetime(2001, 1, 27, 23, 45, + tzinfo=utc)), + ('2001-01-01T01:34Z', datetime.datetime(2001, 1, 1, 1, 34, + tzinfo=utc)), + ('01:42:57Z', datetime.time(1, 42, 57, tzinfo=utc)), ('2001-12-31T01:59:60.123Z', '2001-12-31T01:59:60.123Z'), ('01:00:60', '01:00:60')): with self.subTest(pair=p): @@ -149,6 +153,7 @@ class TestODLDecoder(unittest.TestCase): def test_decode_datetime(self): try: + utc = datetime.timezone.utc from dateutil import tz tz_plus_7 = tz.tzoffset('+7', datetime.timedelta(hours=7)) @@ -156,19 +161,21 @@ class TestODLDecoder(unittest.TestCase): ('1990-158', datetime.date(1990, 6, 7)), ('2001-001', datetime.date(2001, 1, 1)), ('2001-01-01', datetime.date(2001, 1, 1)), - ('12:00', datetime.time(12)), - ('12:00:45', datetime.time(12, 0, 45)), - ('12:00:45.4571', datetime.time(12, 0, 45, 457100)), - ('15:24:12Z', datetime.time(15, 24, 12)), + ('12:00', datetime.time(12, tzinfo=utc)), + ('12:00:45', datetime.time(12, 0, 45, tzinfo=utc)), + ('12:00:45.4571', datetime.time(12, 0, 45, 457100, + tzinfo=utc)), + ('15:24:12Z', datetime.time(15, 24, 12, tzinfo=utc)), ('01:12:22+07', datetime.time(1, 12, 22, tzinfo=tz_plus_7)), ('01:12:22+7', datetime.time(1, 12, 22, tzinfo=tz_plus_7)), ('01:10:39.4575+07', datetime.time(1, 10, 39, 457500, tzinfo=tz_plus_7)), - ('1990-07-04T12:00', datetime.datetime(1990, 7, 4, 12)), + ('1990-07-04T12:00', datetime.datetime(1990, 7, 4, 12, + tzinfo=utc)), ('1990-158T15:24:12Z', - datetime.datetime(1990, 6, 7, 15, 24, 12)), + datetime.datetime(1990, 6, 7, 15, 24, 12, tzinfo=utc)), ('2001-001T01:10:39+7', datetime.datetime(2001, 1, 1, 1, 10, 39, tzinfo=tz_plus_7)), diff --git a/tests/test_pvl.py b/tests/test_pvl.py index 33e2991..3b94af1 100755 --- a/tests/test_pvl.py +++ b/tests/test_pvl.py @@ -526,6 +526,7 @@ def test_dates(): label = pvl.loads(some_pvl) tz_plus_7 = datetime.timezone(datetime.timedelta(hours=7)) + utc = datetime.timezone.utc assert isinstance(label['date1'], datetime.date) assert label['date1'] == datetime.date(1990, 7, 4) @@ -540,25 +541,26 @@ def test_dates(): assert label['date4'] == datetime.date(2001, 1, 1) assert isinstance(label['time1'], datetime.time) - assert label['time1'] == datetime.time(12) + assert label['time1'] == datetime.time(12, tzinfo=utc) assert isinstance(label['time_s'], datetime.time) - assert label['time_s'] == datetime.time(12, 0, 45) + assert label['time_s'] == datetime.time(12, 0, 45, tzinfo=utc) assert isinstance(label['time_s_float'], datetime.time) - assert label['time_s_float'] == datetime.time(12, 0, 45, 457100) + assert label['time_s_float'] == datetime.time(12, 0, 45, 457100, tzinfo=utc) assert isinstance(label['time_tz1'], datetime.time) - assert label['time_tz1'] == datetime.time(15, 24, 12) + assert label['time_tz1'] == datetime.time(15, 24, 12, tzinfo=utc) assert isinstance(label['time_tz2'], datetime.time) assert label['time_tz2'] == datetime.time(1, 12, 22, tzinfo=tz_plus_7) assert isinstance(label['datetime1'], datetime.datetime) - assert label['datetime1'] == datetime.datetime(1990, 7, 4, 12) + assert label['datetime1'] == datetime.datetime(1990, 7, 4, 12, tzinfo=utc) assert isinstance(label['datetime2'], datetime.datetime) - assert label['datetime2'] == datetime.datetime(1990, 6, 7, 15, 24, 12) + assert label['datetime2'] == datetime.datetime(1990, 6, 7, 15, 24, 12, + tzinfo=utc) assert isinstance(label['time_tz3'], datetime.time) assert label['time_tz3'] == datetime.time(1, 12, 22, tzinfo=tz_plus_7)
Returned Date/Time objects must be "aware" The current (1.0.0-alpha2) implementation can return datetime objects that are either "naive" or "aware" (to use the terminology from the Python datetime library). However, the PVL Specification clearly indicates in section 2.3.2.1.3 (Date/Time Value) that: > The Date/Time Value is a strict subset of the CCSDS ASCII Time Code recommendation (Reference [3]), in which all time is represented in Universal Coordinated Time, (i.e. Greenwich Mean Time). This means that any time the `pvl` library returns a datetime object, it should be an "aware" datetime object.
0.0
a953ba2a4b5eb79d7e5ce8dcd30a645f13930137
[ "tests/test_decoder.py::TestDecoder::test_decode_datetime", "tests/test_pvl.py::test_dates" ]
[ "tests/test_decoder.py::TestForTryExcept::test_for_try_except", "tests/test_decoder.py::TestDecoder::test_decode_decimal", "tests/test_decoder.py::TestDecoder::test_decode_non_decimal", "tests/test_decoder.py::TestDecoder::test_decode_quantity", "tests/test_decoder.py::TestDecoder::test_decode_quoted_string", "tests/test_decoder.py::TestDecoder::test_decode_simple_value", "tests/test_decoder.py::TestDecoder::test_decode_unquoted_string", "tests/test_decoder.py::TestODLDecoder::test_decode_datetime", "tests/test_pvl.py::test_assignment", "tests/test_pvl.py::test_spacing", "tests/test_pvl.py::test_linewrap", "tests/test_pvl.py::test_special", "tests/test_pvl.py::test_integers", "tests/test_pvl.py::test_floats", "tests/test_pvl.py::test_exponents", "tests/test_pvl.py::test_objects", "tests/test_pvl.py::test_groups", "tests/test_pvl.py::test_alt_group_style", "tests/test_pvl.py::test_binary", "tests/test_pvl.py::test_octal", "tests/test_pvl.py::test_hex", "tests/test_pvl.py::test_quotes", "tests/test_pvl.py::test_comments", "tests/test_pvl.py::test_set", "tests/test_pvl.py::test_sequence", "tests/test_pvl.py::test_sequence_backslashes", "tests/test_pvl.py::test_units", "tests/test_pvl.py::test_delimiters", "tests/test_pvl.py::test_isis_output", "tests/test_pvl.py::test_cube_label", "tests/test_pvl.py::test_cube_label_r", "tests/test_pvl.py::test_pds3_sample_image", "tests/test_pvl.py::test_load_all_sample_labels", "tests/test_pvl.py::test_unicode", "tests/test_pvl.py::test_bytes", "tests/test_pvl.py::test_end_comment", "tests/test_pvl.py::test_parse_error", "tests/test_pvl.py::test_broken_labels[broken1.lbl-expected0-expected_errors0]", "tests/test_pvl.py::test_broken_labels[broken2.lbl-expected1-expected_errors1]", "tests/test_pvl.py::test_broken_labels[broken3.lbl-expected2-expected_errors2]", "tests/test_pvl.py::test_broken_labels[broken4.lbl-expected3-expected_errors3]", "tests/test_pvl.py::test_broken_labels[broken5.lbl-expected4-expected_errors4]", "tests/test_pvl.py::test_broken_labels[broken6.lbl-expected5-expected_errors5]", "tests/test_pvl.py::test_broken_labels[broken7.lbl-expected6-expected_errors6]", "tests/test_pvl.py::test_broken_labels[broken8.lbl-expected7-expected_errors7]", "tests/test_pvl.py::test_broken_labels[broken9.lbl-expected8-expected_errors8]", "tests/test_pvl.py::test_broken_labels[broken10.lbl-expected9-expected_errors9]", "tests/test_pvl.py::test_broken_labels[broken11.lbl-expected10-expected_errors10]", "tests/test_pvl.py::test_broken_labels[broken12.lbl-expected11-expected_errors11]", "tests/test_pvl.py::test_broken_labels[broken13.lbl-expected12-expected_errors12]", "tests/test_pvl.py::test_broken_labels[broken14.lbl-expected13-expected_errors13]", "tests/test_pvl.py::test_broken_labels[broken15.lbl-expected14-expected_errors14]", "tests/test_pvl.py::test_broken_labels[broken16.lbl-expected15-expected_errors15]", "tests/test_pvl.py::test_broken_labels_LexerError[broken1.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken3.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken4.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken5.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken6.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken7.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken8.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken9.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken10.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken11.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken12.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken13.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken14.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken15.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken16.lbl]", "tests/test_pvl.py::test_broken_labels_ParseError", "tests/test_pvl.py::test_EmptyValueAtLine", "tests/test_pvl.py::test_load_all_bad_sample_labels", "tests/test_pvl.py::test_dump_stream", "tests/test_pvl.py::test_dump_to_file", "tests/test_pvl.py::test_default_encoder", "tests/test_pvl.py::test_pds_encoder", "tests/test_pvl.py::test_special_values", "tests/test_pvl.py::test_special_strings", "tests/test_pvl.py::test_unkown_value", "tests/test_pvl.py::test_quoated_strings", "tests/test_pvl.py::test_dump_to_file_insert_before", "tests/test_pvl.py::test_dump_to_file_insert_after" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-07-28 02:06:27+00:00
bsd-3-clause
4,575
planetarypy__pvl-74
diff --git a/HISTORY.rst b/HISTORY.rst index ad2bec0..47b0b45 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -30,6 +30,22 @@ and the release date, in year-month-day format (see examples below). Unreleased ---------- +1.1.0 (2020-12-04) +------------------ + +Added ++++++ +* Modified `pvl_validate` to more robustly deal with errors, and also provide + more error-reporting via `-v` and `-vv`. +* Modified ISISGrammar so that it can parse comments that begin with an octothorpe (#). + +Fixed ++++++ +* Altered documentation in grammar.py that was incorrectly indicating that + there were parameters that could be passed on object initiation that would + alter how those objects behaved. + + 1.0.1 (2020-09-21) ------------------ diff --git a/pvl/__init__.py b/pvl/__init__.py index 917aa4f..0e8f932 100755 --- a/pvl/__init__.py +++ b/pvl/__init__.py @@ -24,7 +24,7 @@ from .collections import ( __author__ = "The pvl Developers" __email__ = "[email protected]" -__version__ = "1.0.1" +__version__ = "1.1.0" __all__ = [ "load", "loads", diff --git a/pvl/grammar.py b/pvl/grammar.py old mode 100644 new mode 100755 index 2fa3428..02cfa37 --- a/pvl/grammar.py +++ b/pvl/grammar.py @@ -1,5 +1,21 @@ # -*- coding: utf-8 -*- -"""Describes the language aspects of PVL dialects.""" +"""Describes the language aspects of PVL dialects. + +These grammar objects are not particularly meant to be easily +user-modifiable during running of an external program, which is why +they have no arguments at initiation time, nor are there any methods +or functions to modify them. This is because these grammar objects +are used both for reading and writing PVL-text. As such, objects +like PVLGrammar and ODLGrammar shouldn't be altered, because if +they are, then the PVL-text written out with them wouldn't conform +to the spec. + +Certainly, these objects do have attributes that can be altered, +but unless you've carefully read the code, it isn't recommended. + +Maybe someday we'll add a more user-friendly interface to allow that, +but in the meantime, just leave an Issue on the GitHub repo. +""" # Copyright 2019-2020, ``pvl`` library authors. # @@ -15,22 +31,19 @@ class PVLGrammar: """Describes a PVL grammar for use by the lexer and parser. The reference for this grammar is the CCSDS-641.0-B-2 'Blue Book'. - - :param whitespace: Tuple of characters to be recognized as PVL - White Space (used to separate syntactic elements and promote - readability, but the amount or presence of White Space may - not be used to provide different meanings). - - :param reserved_characters: Tuple of characters that may not - occur in Parameter Names, Unquoted Strings, or Block Names. - - :param comments: Tuple of two-tuples with each two-tuple containing - a pair of character sequences that enclose a comment. """ spacing_characters = (" ", "\t") format_effectors = ("\n", "\r", "\v", "\f") + + # Tuple of characters to be recognized as PVL White Space + # (used to separate syntactic elements and promote readability, + # but the amount or presence of White Space may not be used to + # provide different meanings). whitespace = spacing_characters + format_effectors + + # Tuple of characters that may not occur in Parameter Names, + # Unquoted Strings, nor Block Names. reserved_characters = ( "&", "<", @@ -63,6 +76,8 @@ class PVLGrammar: delimiters = (";",) + # Tuple of two-tuples with each two-tuple containing a pair of character + # sequences that enclose a comment. comments = (("/*", "*/"),) # A note on keywords: they should always be compared with @@ -240,6 +255,10 @@ class ISISGrammar(PVLGrammar): object_pref_keywords = ("Object", "End_Object") object_keywords = {"OBJECT": "END_OBJECT"} + # A single-line comment that starts with the octothorpe (#) is not part + # of PVL or ODL, but it is used when ISIS writes out comments. + comments = (("/*", "*/"), ("#", "\n")) + def __init__(self): # ISIS allows for + characters in Unquoted String values. self.reserved_characters = tuple( diff --git a/pvl/pvl_validate.py b/pvl/pvl_validate.py index f646e39..fa4ca32 100644 --- a/pvl/pvl_validate.py +++ b/pvl/pvl_validate.py @@ -85,7 +85,8 @@ def arg_parser(): "--verbose", action="count", default=0, - help="Will report the errors that are encountered.", + help="Will report the errors that are encountered. A second v will " + "include tracebacks for non-pvl exceptions. ", ) p.add_argument("--version", action="version", version=pvl.__version__) p.add_argument( @@ -146,8 +147,17 @@ def pvl_flavor( except (LexerError, ParseError) as err: logging.error(f"{dialect} load error {filename} {err}") loads = False + except: # noqa E722 + if verbose <= 1: + logging.error( + f"{dialect} load error {filename}, try -vv for more info." + ) + else: + logging.exception(f"{dialect} load error {filename}") + logging.error(f"End {dialect} load error {filename}") + loads = False - return (loads, encodes) + return loads, encodes def report(reports: list, flavors: list) -> str: diff --git a/setup.cfg b/setup.cfg index 1a07447..ffeb34f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.0.1 +current_version = 1.1.0 commit = False tag = False parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\-(?P<prerelease>[a-z]+)\.((?P<serial>\d+)))? @@ -22,3 +22,4 @@ values = [bumpversion:file:setup.py] [bumpversion:file:pvl/__init__.py] + diff --git a/setup.py b/setup.py index 7c39881..39c5fad 100755 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ history = open('HISTORY.rst').read().replace('.. :changelog:', '') setup( name='pvl', - version='1.0.1', + version='1.1.0', description='Python implementation of PVL (Parameter Value Language)', long_description=readme + '\n\n' + history, author='The PlanetaryPy Developers',
planetarypy/pvl
019d7c0daea2368ccecabb5e89d54cb418f1c37e
diff --git a/tests/data/isis_octothorpe.txt b/tests/data/isis_octothorpe.txt new file mode 100644 index 0000000..02a470e --- /dev/null +++ b/tests/data/isis_octothorpe.txt @@ -0,0 +1,23 @@ +Group = Radiometry + # Bitweight Correction Parameters + BitweightCorrectionPerformed = "No: Table converted" + BitweightFile = "Not applicable: No bitweight correction" + + # Bias Subtraction Parameters + BiasSubtractionPerformed = Yes + BiasSubtractionMethod = "Overclock fit" + NumberOfOverclocks = 2 + + # Dark Current Subtraction Parameters + DarkSubtractionPerformed = Yes + DarkParameterFile = /usgs/cpkgs/isis3/data/cassini/calibration- + /darkcurrent/nac_median_dark_parameters042- + 28.full.cub + BiasDistortionTable = /usgs/cpkgs/isis3/data/cassini/calibration- + /darkcurrent/nac_bias_distortion.tab + + # Linearity Correction Parameters + LinearityCorrectionPerformed = Yes + LinearityCorrectionTable = /usgs/cpkgs/isis3/data/cassini/calibration- + /linearize/NAC2.lut +End_Group diff --git a/tests/test_pvl.py b/tests/test_pvl.py index 86bea35..ec496a1 100755 --- a/tests/test_pvl.py +++ b/tests/test_pvl.py @@ -811,11 +811,16 @@ def test_delimiters(): def test_isis_output(): - label = pvl.load(os.path.join(DATA_DIR, "isis_output.txt")) - assert label["Results"]["TotalPixels"] == 2048000 + # Should test that both the ISISGrammar and OmniGrammar can deal with these: + for g in (pvl.grammar.OmniGrammar(), pvl.grammar.ISISGrammar()): + label = pvl.load(os.path.join(DATA_DIR, "isis_output.txt"), grammar=g) + assert label["Results"]["TotalPixels"] == 2048000 - naif = pvl.load(os.path.join(DATA_DIR, "isis_naif.txt")) - assert naif["NaifKeywords"]["INS-143400_LIGHTTIME_CORRECTION"] == "LT+S" + naif = pvl.load(os.path.join(DATA_DIR, "isis_naif.txt"), grammar=g) + assert naif["NaifKeywords"]["INS-143400_LIGHTTIME_CORRECTION"] == "LT+S" + + aleish = pvl.load(os.path.join(DATA_DIR, "isis_octothorpe.txt"), grammar=g) + assert aleish["Radiometry"]["NumberOfOverclocks"] == 2 def test_cube_label():
pvl.grammar objects have incorrect documentation, and ISISGrammar needs to handle #-comments **Describe the bug** The documentation for the various grammar objects in grammar.py incorrectly conveys that there are some parameters that could be specified on object instantiation. Additionally, the ISISGrammar needs to be able to handle octothorpe (#) started comments, and it does not. **Expected behavior** * The documentation for the grammar objects should properly convey what is and isn't possible via their interface. * The ISISGrammar object should be able to handle PVL-text with octothorpe comments. **Additional context** Please see #72 for a more extended discussion related to these topics.
0.0
019d7c0daea2368ccecabb5e89d54cb418f1c37e
[ "tests/test_pvl.py::test_isis_output" ]
[ "tests/test_pvl.py::test_assignment", "tests/test_pvl.py::test_spacing", "tests/test_pvl.py::test_linewrap", "tests/test_pvl.py::test_special", "tests/test_pvl.py::test_integers", "tests/test_pvl.py::test_floats", "tests/test_pvl.py::test_exponents", "tests/test_pvl.py::test_objects", "tests/test_pvl.py::test_groups", "tests/test_pvl.py::test_alt_group_style", "tests/test_pvl.py::test_binary", "tests/test_pvl.py::test_octal", "tests/test_pvl.py::test_hex", "tests/test_pvl.py::test_quotes", "tests/test_pvl.py::test_comments", "tests/test_pvl.py::test_dates", "tests/test_pvl.py::test_set", "tests/test_pvl.py::test_sequence", "tests/test_pvl.py::test_sequence_backslashes", "tests/test_pvl.py::test_units", "tests/test_pvl.py::test_delimiters", "tests/test_pvl.py::test_cube_label", "tests/test_pvl.py::test_cube_label_r", "tests/test_pvl.py::test_pds3_sample_image", "tests/test_pvl.py::test_load_all_sample_labels", "tests/test_pvl.py::test_unicode", "tests/test_pvl.py::test_bytes", "tests/test_pvl.py::test_end_comment", "tests/test_pvl.py::test_parse_error", "tests/test_pvl.py::test_broken_labels[broken1.lbl-expected0-expected_errors0]", "tests/test_pvl.py::test_broken_labels[broken2.lbl-expected1-expected_errors1]", "tests/test_pvl.py::test_broken_labels[broken3.lbl-expected2-expected_errors2]", "tests/test_pvl.py::test_broken_labels[broken4.lbl-expected3-expected_errors3]", "tests/test_pvl.py::test_broken_labels[broken5.lbl-expected4-expected_errors4]", "tests/test_pvl.py::test_broken_labels[broken6.lbl-expected5-expected_errors5]", "tests/test_pvl.py::test_broken_labels[broken7.lbl-expected6-expected_errors6]", "tests/test_pvl.py::test_broken_labels[broken8.lbl-expected7-expected_errors7]", "tests/test_pvl.py::test_broken_labels[broken9.lbl-expected8-expected_errors8]", "tests/test_pvl.py::test_broken_labels[broken10.lbl-expected9-expected_errors9]", "tests/test_pvl.py::test_broken_labels[broken11.lbl-expected10-expected_errors10]", "tests/test_pvl.py::test_broken_labels[broken12.lbl-expected11-expected_errors11]", "tests/test_pvl.py::test_broken_labels[broken13.lbl-expected12-expected_errors12]", "tests/test_pvl.py::test_broken_labels[broken14.lbl-expected13-expected_errors13]", "tests/test_pvl.py::test_broken_labels[broken15.lbl-expected14-expected_errors14]", "tests/test_pvl.py::test_broken_labels[broken16.lbl-expected15-expected_errors15]", "tests/test_pvl.py::test_broken_labels_LexerError[broken1.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken3.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken4.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken5.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken6.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken7.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken8.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken9.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken10.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken11.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken12.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken13.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken14.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken15.lbl]", "tests/test_pvl.py::test_broken_labels_LexerError[broken16.lbl]", "tests/test_pvl.py::test_broken_labels_ParseError", "tests/test_pvl.py::test_EmptyValueAtLine", "tests/test_pvl.py::test_load_all_bad_sample_labels", "tests/test_pvl.py::test_dump_stream", "tests/test_pvl.py::test_dump_to_file", "tests/test_pvl.py::test_default_encoder", "tests/test_pvl.py::test_pds_encoder", "tests/test_pvl.py::test_special_values", "tests/test_pvl.py::test_special_strings", "tests/test_pvl.py::test_unkown_value", "tests/test_pvl.py::test_quoated_strings", "tests/test_pvl.py::test_dump_to_file_insert_before", "tests/test_pvl.py::test_dump_to_file_insert_after" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-12-05 04:03:54+00:00
bsd-3-clause
4,576
planetarypy__pvl-83
diff --git a/HISTORY.rst b/HISTORY.rst index 8641dfd..230387b 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -41,9 +41,24 @@ Added milisecond time precision (not microsecond as ODL allows), and does not allow times with a +HH:MM timezone specifier. It does assume any time without a timezone specifier is a UTC time. +* Added a ``real_cls`` parameter to the decoder classes, so that users can specify + an arbitrary type with which real numbers in the PVL-text could be returned in + the dict-like from the loaders (defaults to ``float`` as you'd expect). +* The encoders now support a broader range of real types to complement the decoders. + +Changed ++++++++ +* Improved some build and test functionality. +* Moved the is_identifier() static function from the ODLEncoder to the ODLDecoder + where it probably should have always been. + Fixed +++++ +* Very long Python ``str`` objects that otherwise qualified as ODL/PDS3 Symbol Strings, + would get written out with single-quotes, but they would then be split across lines + via the formatter, so they should be written as Text Strings with double-quotes. + Better protections have been put in place. * pvl.decoder.ODLDecoder now will return both "aware" and "naive" datetime objects (as appropriate) since "local" times without a timezone are allowed under ODL. @@ -58,13 +73,6 @@ Fixed precision. -Changed -+++++++ -* Improved some build and test functionality. -* Moved the is_identifier() static function from the ODLEncoder to the ODLDecoder - where it probably should have always been. - - 1.1.0 (2020-12-04) ------------------ diff --git a/docs/parsing.rst b/docs/parsing.rst index 8805747..5b26a7d 100644 --- a/docs/parsing.rst +++ b/docs/parsing.rst @@ -258,3 +258,44 @@ This is very similar to parsing PVL text from a file, but you use Quantity(value=50.784875, units='DEG') Of course, other kinds of URLs, like file, ftp, rsync, sftp and more can be used. + + +--------------------------- +Return non-standard objects +--------------------------- + +The "loaders" return a dict-like filled with Python objects based on the types inferred from the +PVL-text. Sometimes you may want the `pvl` library to return different types in the dict-like, +and `pvl` has some limited capacity for that (so far just real and quantity types). + +Normally real number values in the PVL-text will be returned as Python :class:`float` objects. +However, what if you wanted all of the real values to be returned in the dict-like as Python +:class:`decimal.Decimal` objects (because you wanted to preserve numeric precision)? You can +do that by providing the object type you want via the ``real_cls`` argument of a decoder constructor, +like so:: + + >>> from decimal import Decimal + >>> import pvl + >>> text = "gigawatts = 1.210" + >>> + >>> flo = pvl.loads(text) + >>> print(flo) + PVLModule([ + ('gigawatts', 1.21) + ]) + >>> + >>> print(type(flo["gigawatts"])) + <class 'float'> + >>> dec = pvl.loads(text, decoder=pvl.decoder.OmniDecoder(real_cls=Decimal)) + >>> print(dec) + PVLModule([ + ('gigawatts', Decimal('1.210')) + ]) + >>> print(type(dec["gigawatts"])) + <class 'decimal.Decimal'> + +Any class that can be passed a :class:`str` object to initialize an object can be provided to +``real_cls``, but it should emit a :class:`ValueError` if it is given a string that should not +be converted to a real number value. + +To learn more about quantity classes in `pvl`, please see :ref:`quantities`. \ No newline at end of file diff --git a/docs/quantities.rst b/docs/quantities.rst index 1ae2f19..edf24d7 100644 --- a/docs/quantities.rst +++ b/docs/quantities.rst @@ -1,3 +1,5 @@ +.. _quantities: + ============================ Quantities: Values and Units ============================ diff --git a/pvl/decoder.py b/pvl/decoder.py index 22382b7..c98dd86 100644 --- a/pvl/decoder.py +++ b/pvl/decoder.py @@ -9,7 +9,7 @@ referred to as the Blue Book with a date of June 2000. A decoder deals with converting strings given to it (typically by the parser) to the appropriate Python type. """ -# Copyright 2015, 2017, 2019-2020, ``pvl`` library authors. +# Copyright 2015, 2017, 2019-2021, ``pvl`` library authors. # # Reuse is permitted under the terms of the license. # The AUTHORS file and the LICENSE file are at the @@ -17,6 +17,7 @@ by the parser) to the appropriate Python type. import re from datetime import datetime, timedelta, timezone +from decimal import InvalidOperation from itertools import repeat, chain from warnings import warn @@ -56,9 +57,12 @@ class PVLDecoder(object): :param quantity_cls: defaults to :class:`pvl.collections.Quantity`, but could be any class object that takes two arguments, where the first is the value, and the second is the units value. + + :param real_cls: defaults to :class:`float`, but could be any class object + that can be constructed from a `str` object. """ - def __init__(self, grammar=None, quantity_cls=None): + def __init__(self, grammar=None, quantity_cls=None, real_cls=None): self.errors = [] if grammar is None: @@ -73,6 +77,11 @@ class PVLDecoder(object): else: self.quantity_cls = quantity_cls + if real_cls is None: + self.real_cls = float + else: + self.real_cls = real_cls + def decode(self, value: str): """Returns a Python object based on *value*.""" return self.decode_simple_value(value) @@ -83,6 +92,15 @@ class PVLDecoder(object): <Simple-Value> ::= (<Date-Time> | <Numeric> | <String>) """ + if value.casefold() == self.grammar.none_keyword.casefold(): + return None + + if value.casefold() == self.grammar.true_keyword.casefold(): + return True + + if value.casefold() == self.grammar.false_keyword.casefold(): + return False + for d in ( self.decode_quoted_string, self.decode_non_decimal, @@ -94,15 +112,6 @@ class PVLDecoder(object): except ValueError: pass - if value.casefold() == self.grammar.none_keyword.casefold(): - return None - - if value.casefold() == self.grammar.true_keyword.casefold(): - return True - - if value.casefold() == self.grammar.false_keyword.casefold(): - return False - return self.decode_unquoted_string(value) def decode_unquoted_string(self, value: str) -> str: @@ -160,16 +169,18 @@ class PVLDecoder(object): return str(value[1:-1]) raise ValueError(f'The object "{value}" is not a PVL Quoted String.') - @staticmethod - def decode_decimal(value: str): - """Returns a Python ``int`` or ``float`` as appropriate + def decode_decimal(self, value: str): + """Returns a Python ``int`` or ``self.real_cls`` object, as appropriate based on *value*. Raises a ValueError otherwise. """ - # Returns int or float + # Returns int or real_cls try: return int(value, base=10) except ValueError: - return float(value) + try: + return self.real_cls(str(value)) + except InvalidOperation as err: + raise ValueError from err def decode_non_decimal(self, value: str) -> int: """Returns a Python ``int`` as decoded from *value* @@ -294,13 +305,17 @@ class ODLDecoder(PVLDecoder): default to an ODLGrammar() object. """ - def __init__(self, grammar=None, quantity_cls=None): + def __init__(self, grammar=None, quantity_cls=None, real_cls=None): self.errors = [] if grammar is None: - super().__init__(grammar=ODLGrammar(), quantity_cls=quantity_cls) - else: - super().__init__(grammar=grammar, quantity_cls=quantity_cls) + grammar = ODLGrammar() + + super().__init__( + grammar=grammar, + quantity_cls=quantity_cls, + real_cls=real_cls + ) def decode_datetime(self, value: str): """Extends parent function to also deal with datetimes diff --git a/pvl/encoder.py b/pvl/encoder.py index 5d8118b..8198fe5 100644 --- a/pvl/encoder.py +++ b/pvl/encoder.py @@ -5,7 +5,7 @@ An encoder deals with converting Python objects into string values that conform to a PVL specification. """ -# Copyright 2015, 2019-2020, ``pvl`` library authors. +# Copyright 2015, 2019-2021, ``pvl`` library authors. # # Reuse is permitted under the terms of the license. # The AUTHORS file and the LICENSE file are at the @@ -16,6 +16,7 @@ import re import textwrap from collections import abc, namedtuple +from decimal import Decimal from warnings import warn from .collections import PVLObject, PVLGroup, Quantity @@ -126,6 +127,9 @@ class PVLEncoder(object): f"group_class ({group_class})." ) + # Finally, let's keep track of everything we consider "numerical": + self.numeric_types = (int, float, self.decoder.real_cls, Decimal) + def _import_quantities(self): warn_str = ( "The {} library is not present, so {} objects will " @@ -382,8 +386,8 @@ class PVLEncoder(object): return self.grammar.true_keyword else: return self.grammar.false_keyword - elif isinstance(value, (int, float)): - return repr(value) + elif isinstance(value, self.numeric_types): + return str(value) elif isinstance(value, str): return self.encode_string(value) else: @@ -561,8 +565,8 @@ class ODLEncoder(PVLEncoder): For Python, these correspond to the following: - * numeric_value: int, float, and Quantity whose value - is int or float + * numeric_value: any of self.numeric_types, and Quantity whose value + is one of the self.numeric_types. * date_time_string: datetime objects * text_string_value: str * symbol_value: str @@ -570,13 +574,19 @@ class ODLEncoder(PVLEncoder): """ for quant in self.quantities: if isinstance(value, quant.cls): - if isinstance(getattr(value, quant.value_prop), (int, float)): + if isinstance( + getattr(value, quant.value_prop), self.numeric_types + ): return True - if isinstance( - value, - (int, float, datetime.date, datetime.datetime, datetime.time, str), - ): + scalar_types = ( + *self.numeric_types, + datetime.date, + datetime.datetime, + datetime.time, + str + ) + if isinstance(value, scalar_types): return True return False @@ -602,6 +612,15 @@ class ODLEncoder(PVLEncoder): if fe in value: return False + if len(value) > self.width / 2: + # This means that the string is long and it is very + # likely to get wrapped and have carriage returns, + # and thus "ODL Format Effectors" inserted later. + # Unfortunately, without knowing the width of the + # parameter term, and the current indent level, this + # still may end up being incorrect threshhold. + return False + if value.isprintable() and len(value) > 0: # Item 3 return True else: @@ -711,7 +730,10 @@ class ODLEncoder(PVLEncoder): """ for quant in self.quantities: if isinstance(value, quant.cls): - if isinstance(getattr(value, quant.value_prop), (int, float)): + if isinstance( + getattr(value, quant.value_prop), + self.numeric_types + ): return super().encode_value(value) else: raise ValueError(
planetarypy/pvl
b64e4afcc51ee97ca2d0e912b3f6d07fb7428779
diff --git a/tests/test_decoder.py b/tests/test_decoder.py index 8caa1cf..2d6f492 100644 --- a/tests/test_decoder.py +++ b/tests/test_decoder.py @@ -18,6 +18,7 @@ import datetime import itertools import unittest +from decimal import Decimal from pvl.decoder import PVLDecoder, ODLDecoder, PDSLabelDecoder, for_try_except from pvl.collections import Quantity @@ -100,6 +101,13 @@ class TestDecoder(unittest.TestCase): with self.subTest(string=s): self.assertRaises(ValueError, self.d.decode_decimal, s) + def test_decode_withDecimal(self): + d = PVLDecoder(real_cls=Decimal) + s = "123.450" + self.assertEqual(d.decode_decimal(s), Decimal(s)) + + self.assertRaises(ValueError, d.decode_decimal, "fruit") + def test_decode_non_decimal(self): for p in ( ("2#0101#", 5), diff --git a/tests/test_encoder.py b/tests/test_encoder.py index d7a3ff0..864d3e2 100644 --- a/tests/test_encoder.py +++ b/tests/test_encoder.py @@ -2,6 +2,7 @@ import datetime import unittest +from decimal import Decimal from pvl.encoder import PVLEncoder, ODLEncoder, PDSLabelEncoder from pvl.collections import Quantity, PVLModule, PVLGroup, PVLObject @@ -84,6 +85,7 @@ class TestEncoder(unittest.TestCase): (True, "TRUE"), (1.23, "1.23"), (42, "42"), + (Decimal("12.30"), "12.30"), ("ABC", "ABC"), ) for p in pairs:
Retain precision on PVL Real Numbers Sometimes "PVL Real Numbers" like 1234.560 are provided in PVL-text. The current `pvl` library reads such a "PVL Real Number" during `pvl.load()` and converts it to a Python `float`. If that value is then written back out to PVL text by `pvl.dump()` then it will be written 1234.56. While the two values are numerically equivalent, the number has lost some "representational precision" that may be significant. The PVL/ODL/PDS3 specifications don't care about this, but people do. Likewise, floating point arithmetic can result in Python `float` objects which have an unreasonable precision that would get written to output PVL-text. For example, 1.1 + 2.2 via a binary floating point addition could display as 3.3000000000000003. This is generally handled in Python during conversion of individual `float` objects to `str` objects by string formatting, but since many such numbers with different levels of needed precision could be written out when a dict-like is handed to `pvl.dump()` specifying a one-size-fits all precision for a whole `pvl.dump()` run is unreasonable. **Describe the solution you'd like** If "PVL Real Numbers" were decoded to Python `decimal.Decimal` objects instead of `float` objects, they could retain their precision. The `pvl` decoder classes currently have a `quantity_cls` parameter on instantiation that instructs the decoder what kind of quantity class should be used to decode a PVL numeric value with associated units, and specifies the kind of objects returned in the dict-like that match that kind of PVL text. Seems like we could create a similar `real_cls` parameter for decoders that would default to Python `float`, but for which Python `decimal.Decimal` could be used (or anything else that could take a text representation of a PVL Real Number and convert it to some Python object), which would return `Decimal` objects in the dict-like, and then when a dict-like with such objects is written out via `pvl.dump()` it could retain the relevant precision. Such a mechanism would also allow programmers to use various operations on the `Decimal` objects to specify individual amounts of precision for different values that would then be used when the dict-like was written out via `pvl.dump()`. **Describe alternatives you've considered** An alternative would be to have some mechanism for just keeping all parsed values as Python `str` so that they "stay the same" during processing, but then extra work would need to be done on output during `pvl.dump()` to properly convert all of the Python `str` which aren't all really PVL Text Strings to the appropriate representation. Ironically, it occurs to me that you could provide Python `str` to the hypothetical `real_cls` attribute described above to do exactly that during the `pvl.load()` step, but then would have to perhaps write a custom encoder that would make sure to take these strings-that-are-really-floats and serialize them to PVL-text that didn't enclose these strings-that-are-really-floats in quotation marks.
0.0
b64e4afcc51ee97ca2d0e912b3f6d07fb7428779
[ "tests/test_decoder.py::TestDecoder::test_decode_withDecimal", "tests/test_encoder.py::TestEncoder::test_encode_simple_value" ]
[ "tests/test_decoder.py::TestForTryExcept::test_for_try_except", "tests/test_decoder.py::TestDecoder::test_decode_datetime", "tests/test_decoder.py::TestDecoder::test_decode_decimal", "tests/test_decoder.py::TestDecoder::test_decode_non_decimal", "tests/test_decoder.py::TestDecoder::test_decode_quantity", "tests/test_decoder.py::TestDecoder::test_decode_quoted_string", "tests/test_decoder.py::TestDecoder::test_decode_simple_value", "tests/test_decoder.py::TestDecoder::test_decode_unquoted_string", "tests/test_decoder.py::TestODLDecoder::test_decode_datetime", "tests/test_decoder.py::TestPDS3Decoder::test_decode_datetime", "tests/test_encoder.py::TestEncoder::test_encode", "tests/test_encoder.py::TestEncoder::test_encode_aggregation_block", "tests/test_encoder.py::TestEncoder::test_encode_assignment", "tests/test_encoder.py::TestEncoder::test_encode_date", "tests/test_encoder.py::TestEncoder::test_encode_datetime", "tests/test_encoder.py::TestEncoder::test_encode_module", "tests/test_encoder.py::TestEncoder::test_encode_quantity", "tests/test_encoder.py::TestEncoder::test_encode_sequence", "tests/test_encoder.py::TestEncoder::test_encode_set", "tests/test_encoder.py::TestEncoder::test_encode_string", "tests/test_encoder.py::TestEncoder::test_encode_time", "tests/test_encoder.py::TestEncoder::test_encode_value", "tests/test_encoder.py::TestEncoder::test_format", "tests/test_encoder.py::TestODLEncoder::test_encode_quantity", "tests/test_encoder.py::TestODLEncoder::test_encode_time", "tests/test_encoder.py::TestODLEncoder::test_is_scalar", "tests/test_encoder.py::TestPDSLabelEncoder::test_convert_grp_to_obj", "tests/test_encoder.py::TestPDSLabelEncoder::test_count_aggs", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_aggregation_block", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_set", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_time", "tests/test_encoder.py::TestPDSLabelEncoder::test_is_PDSgroup" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-03-16 23:20:26+00:00
bsd-3-clause
4,577
planetarypy__pvl-85
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 5312514..f13d83c 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -62,8 +62,12 @@ Ready to contribute? Here's how to set up `pvl` for local development. If you are a conda user:: $ cd pvl/ - $ conda env create -n pvl -f environment.yml + $ conda env create -n pvldev -f environment.yml + $ conda activate pvldev + $ pip install --no-deps -e . + The last `pip install` installs pvl in "editable" mode which facilitates + testing. 4. Create a branch for local development:: diff --git a/README.rst b/README.rst index cd56190..3710f1c 100644 --- a/README.rst +++ b/README.rst @@ -35,7 +35,7 @@ Python implementation of a PVL (Parameter Value Language) library. * `PlanetaryPy`_ Affiliate Package. PVL is a markup language, similar to XML, commonly employed for -entries in the Planetary Database System used by NASA to store +entries in the Planetary Data System used by NASA to archive mission data, among other uses. This package supports both encoding and decoding a variety of PVL 'flavors' including PVL itself, ODL, `NASA PDS 3 Labels`_, and `USGS ISIS Cube Labels`_. diff --git a/pvl/encoder.py b/pvl/encoder.py index 8198fe5..4c8a916 100644 --- a/pvl/encoder.py +++ b/pvl/encoder.py @@ -825,20 +825,34 @@ class PDSLabelEncoder(ODLEncoder): You are not allowed to chose *end_delimiter* or *newline* as the parent class allows, because to be PDS-compliant, - those are fixed choices. - - In PVL and ODL, the OBJECT and GROUP aggregations are - interchangable, but the PDS applies restrictions to what can - appear in a GROUP. If *convert_group_to_object* is True, - and a GROUP does not conform to the PDS definition of a GROUP, - then it will be written out as an OBJECT. If it is False, - then an exception will be thrown if incompatible GROUPs are - encountered. - - *tab_replace* should indicate the number of space characters - to replace horizontal tab characters with (since tabs aren't - allowed in PDS labels). If this is set to zero, tabs will not - be replaced with spaces. Defaults to 4. + those are fixed choices. However, in some cases, the PDS3 + Standards are asymmetric, allowing for a wider variety of + PVL-text on "read" and a more narrow variety of PVL-text + on "write". The default values of the PDSLabelEncoder enforce + those strict "write" rules, but if you wish to alter them, + but still produce PVL-text that would validate against the PDS3 + standard, you may alter them. + + :param convert_group_to_object: Defaults to True, meaning that + if a GROUP does not conform to the PDS definition of a + GROUP, then it will be written out as an OBJECT. If it is + False, then an exception will be thrown if incompatible + GROUPs are encountered. In PVL and ODL, the OBJECT and GROUP + aggregations are interchangeable, but the PDS applies + restrictions to what can appear in a GROUP. + :param tab_replace: Defaults to 4 and indicates the number of + space characters to replace horizontal tab characters with + (since tabs aren't allowed in PDS labels). If this is set + to zero, tabs will not be replaced with spaces. + :param symbol_single_quotes: Defaults to True, and if a Python `str` + object qualifies as a PVL Symbol String, it will be written to + PVL-text as a single-quoted string. If False, no special + handling is done, and any PVL Symbol String will be treated + as a PVL Text String, which is typically enclosed with double-quotes. + :param time_trailing_z: defaults to True, and suffixes a "Z" to + datetimes and times written to PVL-text as the PDS encoding + standard requires. If False, no trailing "Z" is written. + """ def __init__( @@ -850,6 +864,8 @@ class PDSLabelEncoder(ODLEncoder): aggregation_end=True, convert_group_to_object=True, tab_replace=4, + symbol_single_quote=True, + time_trailing_z=True, ): if grammar is None: @@ -870,6 +886,8 @@ class PDSLabelEncoder(ODLEncoder): self.convert_group_to_object = convert_group_to_object self.tab_replace = tab_replace + self.symbol_single_quote = symbol_single_quote + self.time_trailing_z = time_trailing_z def count_aggs( self, module: abc.Mapping, obj_count: int = 0, grp_count: int = 0 @@ -1042,6 +1060,18 @@ class PDSLabelEncoder(ODLEncoder): return super().encode_set(values) + def encode_string(self, value): + """Extends parent function to treat Symbol Strings as Text Strings, + which typically means that they are double-quoted and not + single-quoted. + """ + if self.decoder.is_identifier(value): + return value + elif self.is_symbol(value) and self.symbol_single_quote: + return "'" + value + "'" + else: + return super(ODLEncoder, self).encode_string(value) + def encode_time(self, value: datetime.time) -> str: """Overrides parent's encode_time() function because even though ODL allows for timezones, PDS does not. @@ -1070,15 +1100,18 @@ class PDSLabelEncoder(ODLEncoder): s += f":{value:%S}" if ( - value.tzinfo is not None and - value.utcoffset() != datetime.timedelta() + value.tzinfo is None or + value.tzinfo.utcoffset(None) == datetime.timedelta(0) ): + if self.time_trailing_z: + return s + "Z" + else: + return s + else: raise ValueError( "PDS labels should only have UTC times, but " f"this time has a timezone: {value}" ) - else: - return s + "Z" class ISISEncoder(PVLEncoder):
planetarypy/pvl
cc17586a45d7a452251b9ce49de8be9cfebc0589
diff --git a/tests/test_encoder.py b/tests/test_encoder.py index 864d3e2..91669c0 100644 --- a/tests/test_encoder.py +++ b/tests/test_encoder.py @@ -283,6 +283,13 @@ END_OBJECT = key""" t = datetime.time(10, 54, 12, 123456, tzinfo=datetime.timezone.utc) self.assertRaises(ValueError, self.e.encode_time, t) + e = PDSLabelEncoder(time_trailing_z=False) + self.assertEqual("01:02", e.encode_time(datetime.time(1, 2))) + + def test_encode_string(self): + e = PDSLabelEncoder(symbol_single_quote=False) + self.assertEqual('"AB CD"', e.encode_string('AB CD')) + def test_encode(self): m = PVLModule(a=PVLGroup(g1=2, g2=3.4), b="c")
We probably need a variant PDS3LabelEncoder(). The various PVL "flavors" sometimes have asymmetric rules for "reading" and "writing." They are generally more permissive on "read" and strict on "write." For example, for PDS3 Labels: PDS3 Reads | `pvl` converts to Python | PDS3 Writes -------------|-------------------------|------------- 2000-02-25T10:54:12.129Z | `datetime` | 2000-02-25T10:54:12.129Z 2000-02-25T10:54:12.129 | `datetime` | 2000-02-25T10:54:12.129Z 'I am a Symbol String' | `str` | 'I am a Symbol String' "I'm a Text String" | `str` | "I'm a Text String" "I am double-quoted" | `str` | 'I am double-quoted' See the asymmetry? PDS3 Label "readers" can read times without any kind of timezone specifier, or with the "Z" specifier, but PDS3 Label "writers" must write out the trailing "Z" (see #82 for too much information). Similarly, did you know that ODL/PDS3 considers single-quoted strings as 'Symbol Strings' with restrictions on what can be inside them, and double-quoted "Text Strings" which can contain anything. So on "write" our `pvl.encoder.PDS3LabelEncoder` sees if a Python `str` qualifies as a 'Symbol String' and writes it out with single quotes, and as a double-quoted "Text String" otherwise. If a user wanted to get some PVL-text that could be read by a PDS3 "reader" but wanted no trailing "Z" or some text that qualified as a Symbol String to be double-quoted, we don't have a good mechansim for that, beyond them extending our `PDS3LabelEncoder()`. **Describe the solution you'd like** I think we should keep the `pvl.encoder.PDS3LabelEncoder()` as it is, meaning that it is very strict. However, we should create a `pvl.encoder.PDS3VariantEncoder()` that allows users to specify which of the "optional" formatting they'd like to allow, but still write out PVL-text that is "readable" by our `pvl.decoder.PDS3Decoder()`. **Describe alternatives you've considered** I thought about simply putting optional parameters in the existing `pvl.encoder.PDS3LabelEncoder()` that would modify the behavior, however, while users may have an option, the ODL/PDS3 Spec on writing isn't really optional. So I'd rather keep the `pvl.encoder.PDS3LabelEncoder()` very strict and immutable, but still provide this other encoder. This also made me think more about allowing more "smart" types that we decode PVL-text to that "remember" the PVL-text that they came from (as Michael suggested in #81). If the library could determine whether there was or wasn't a "Z" on the text that became a `datetime` then it could write it back out the same way. Similarly with the different string quoting patterns. That's all still valid, but this question of what to "allow" on output would still remain. **Additional context** The context here is that there are many bits of software out there that "read" PVL-text, and they do not all fully implement the PDS3 "reader" spec. So someone could properly use the PDS3 spec to create a time with a trailing "Z", which would validate and be archived with the PDS, but software that "thinks" it works on PDS3 PVL-text might barf on the "Z" because it isn't properly implementing the "reading" of PDS3 labels. However, if someone didn't properly use the PDS3 spec, and wrote times without the trailing "Z", those labels **would still validate and be archived**, because the validators are "readers" that can read both (stupid asymmetric standard). In practice, this has happened quite a bit, and if a developer was making some software by just looking at a corpus of PVL-text (and not closely reading the ODL/PDS3 spec), that software may not handle the "Z". As always, there's the spec, and then there's how people use things in practice. Having this variant encoder would help us support a wider variety of the ways that people are practically using PDS3 PVL-text, while still being "readable" by official PDS3 "readers."
0.0
cc17586a45d7a452251b9ce49de8be9cfebc0589
[ "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_string", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_time" ]
[ "tests/test_encoder.py::TestEncoder::test_encode", "tests/test_encoder.py::TestEncoder::test_encode_aggregation_block", "tests/test_encoder.py::TestEncoder::test_encode_assignment", "tests/test_encoder.py::TestEncoder::test_encode_date", "tests/test_encoder.py::TestEncoder::test_encode_datetime", "tests/test_encoder.py::TestEncoder::test_encode_module", "tests/test_encoder.py::TestEncoder::test_encode_quantity", "tests/test_encoder.py::TestEncoder::test_encode_sequence", "tests/test_encoder.py::TestEncoder::test_encode_set", "tests/test_encoder.py::TestEncoder::test_encode_simple_value", "tests/test_encoder.py::TestEncoder::test_encode_string", "tests/test_encoder.py::TestEncoder::test_encode_time", "tests/test_encoder.py::TestEncoder::test_encode_value", "tests/test_encoder.py::TestEncoder::test_format", "tests/test_encoder.py::TestODLEncoder::test_encode_quantity", "tests/test_encoder.py::TestODLEncoder::test_encode_time", "tests/test_encoder.py::TestODLEncoder::test_is_scalar", "tests/test_encoder.py::TestPDSLabelEncoder::test_convert_grp_to_obj", "tests/test_encoder.py::TestPDSLabelEncoder::test_count_aggs", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_aggregation_block", "tests/test_encoder.py::TestPDSLabelEncoder::test_encode_set", "tests/test_encoder.py::TestPDSLabelEncoder::test_is_PDSgroup" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-03-20 18:46:24+00:00
bsd-3-clause
4,578
planetarypy__pvl-92
diff --git a/HISTORY.rst b/HISTORY.rst index 0303c97..51e2522 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -30,6 +30,13 @@ and the release date, in year-month-day format (see examples below). Unreleased ---------- +Added ++++++ +* pvl.collections.Quantity objects now have __int__() and __float__() functions that + will return the int and float versions of their .value parameter to facilitate numeric + operations with Quantity objects (Issue 91). + + 1.2.1 (2021-05-31) ------------------ diff --git a/pvl/collections.py b/pvl/collections.py index 5679915..5eebdbd 100644 --- a/pvl/collections.py +++ b/pvl/collections.py @@ -692,7 +692,11 @@ class Quantity(namedtuple("Quantity", ["value", "units"])): for how to use 3rd party Quantity objects with pvl. """ - pass + def __int__(self): + return int(self.value) + + def __float__(self): + return float(self.value) class Units(Quantity):
planetarypy/pvl
4f3b28e338d4f6c0b4963cb7b1e4234c49076203
diff --git a/tests/test_collections.py b/tests/test_collections.py index 61b5126..b7cb3e7 100644 --- a/tests/test_collections.py +++ b/tests/test_collections.py @@ -880,4 +880,28 @@ class TestMultiDict(unittest.TestCase): ) except ImportError: - pass \ No newline at end of file + pass + + +class TestQuantity(unittest.TestCase): + + def setUp(self): + self.d = pvl.loads("a = 2 <m> b = 1.21 <gW> c = nine <planets>") + + def test_int(self): + a = int(self.d["a"]) + self.assertEqual(2, a) + + b = int(self.d["b"]) + self.assertEqual(1, b) + + self.assertRaises(ValueError, int, self.d["c"]) + + def test_float(self): + a = float(self.d["a"]) + self.assertEqual(2.0, a) + + b = float(self.d["b"]) + self.assertEqual(1.21, b) + + self.assertRaises(ValueError, float, self.d["c"])
Provide some additional functions for pvl.collection.Quantity objects. **Is your feature request related to a problem? Please describe.** Sometimes a caller will expect a particular key in the returned object, and will expect a numeric value but won't know if the value is a bare numeric value (of type float or int) or a Quantity object, but in any case won't care about the unit, and is only after the numeric value. In this case, they need to perform an `isinstance()` test to know whether they can use the value as-is, or if they need to call the `.value` attribute. **Describe the solution you'd like** Implement `__float__()` and `__int__()` functions in the `pvl.collection.Quantity` class, so that a caller can just call `float(value)` or `int(value)` to get the contents of `Quantity.value`, as needed, or a harmless conversion of a float to a float (if the value isn't a Quantity object), for example. **Describe alternatives you've considered** The alternate solution is to do nothing and continue to force type testing behavior on users, but this results in more complex downstream code. This simple change would streamline certain use cases.
0.0
4f3b28e338d4f6c0b4963cb7b1e4234c49076203
[ "tests/test_collections.py::TestQuantity::test_float", "tests/test_collections.py::TestQuantity::test_int" ]
[ "tests/test_collections.py::TestClasses::test_KeysView", "tests/test_collections.py::TestClasses::test_MappingView", "tests/test_collections.py::TestClasses::test_MutableMappingSequence", "tests/test_collections.py::TestClasses::test_ValuesView", "tests/test_collections.py::TestMultiDicts::test_append", "tests/test_collections.py::TestMultiDicts::test_clear", "tests/test_collections.py::TestMultiDicts::test_copy", "tests/test_collections.py::TestMultiDicts::test_delete", "tests/test_collections.py::TestMultiDicts::test_dict_creation", "tests/test_collections.py::TestMultiDicts::test_empty", "tests/test_collections.py::TestMultiDicts::test_equality", "tests/test_collections.py::TestMultiDicts::test_index_access", "tests/test_collections.py::TestMultiDicts::test_insert", "tests/test_collections.py::TestMultiDicts::test_insert_after", "tests/test_collections.py::TestMultiDicts::test_insert_before", "tests/test_collections.py::TestMultiDicts::test_insert_before_after_raises", "tests/test_collections.py::TestMultiDicts::test_iterators", "tests/test_collections.py::TestMultiDicts::test_key_access", "tests/test_collections.py::TestMultiDicts::test_key_index", "tests/test_collections.py::TestMultiDicts::test_keyword_creation", "tests/test_collections.py::TestMultiDicts::test_len", "tests/test_collections.py::TestMultiDicts::test_list_creation", "tests/test_collections.py::TestMultiDicts::test_pop_noarg", "tests/test_collections.py::TestMultiDicts::test_repr", "tests/test_collections.py::TestMultiDicts::test_set", "tests/test_collections.py::TestMultiDicts::test_slice_access", "tests/test_collections.py::TestMultiDicts::test_update", "tests/test_collections.py::TestDifferences::test_as_list", "tests/test_collections.py::TestDifferences::test_conversion", "tests/test_collections.py::TestDifferences::test_discard", "tests/test_collections.py::TestDifferences::test_equality", "tests/test_collections.py::TestDifferences::test_pop", "tests/test_collections.py::TestDifferences::test_popitem", "tests/test_collections.py::TestDifferences::test_py3_items", "tests/test_collections.py::TestDifferences::test_repr", "tests/test_collections.py::TestMultiDict::test_insert", "tests/test_collections.py::TestMultiDict::test_repr", "tests/test_collections.py::TestMultiDict::test_str" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2021-07-28 01:21:57+00:00
bsd-3-clause
4,579
planetarypy__pvl-96
diff --git a/HISTORY.rst b/HISTORY.rst index fb9ec0e..266e002 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -10,7 +10,7 @@ The format is based on `Keep a Changelog <https://keepachangelog.com/en/1.0.0/>` and this project adheres to `Semantic Versioning <https://semver.org/spec/v2.0.0.html>`_. When updating this file, please add an entry for your change under -Unreleased_ and one of the following headings: +`Not Yet Released`_ and one of the following headings: - Added - for new features. - Changed - for changes in existing functionality. @@ -27,8 +27,8 @@ underlined with dashes under Unreleased_ with the version number and the release date, in year-month-day format (see examples below). -Unreleased ----------- +Not Yet Released +---------------- Added +++++ @@ -39,7 +39,7 @@ Added * pvl.load() now has an `encoding=` parameter that is identical in usage to the parameter passed to `open()`, and will attempt to decode the whole file as if it had been encoded thusly. If it encounters a decoding error, - it will fall back to decoding the bytes one at a time as ASCII text. (Issue 93) + it will fall back to decoding the bytes one at a time as ASCII text (Issue 93). Fixed +++++ @@ -49,7 +49,11 @@ Fixed so that if there are weird UTF characters in the PVL-text, you'll get those weird UTF characters in the returned dict-like. When the stricter PVL, ODL, or PDS3 dialects are used to "load" PVL-text, - they will properly fail to parse this text. (Issue 93). + they will properly fail to parse this text (Issue 93). +* Empty parameters inside groups or objects (but not at the end), would + cause the default "Omni" parsing strategy to go into an infinite + loop. Empty parameters in PVL, ODL, and PDS3 continue to not be + allowed (Issue 95). 1.2.1 (2021-05-31) diff --git a/README.rst b/README.rst index 3206cfa..534a36b 100644 --- a/README.rst +++ b/README.rst @@ -162,7 +162,7 @@ with ``dict``-style access:: ... ]) >>> print(module['foo']) bar - >>> print(module.getlist('foo')) + >>> print(module.getall('foo')) ['bar', 'remember me?'] >>> print(module.items()) ItemsView(PVLModule([ diff --git a/pvl/collections.py b/pvl/collections.py index 5eebdbd..4687f2b 100644 --- a/pvl/collections.py +++ b/pvl/collections.py @@ -305,6 +305,21 @@ class OrderedMultiDict(dict, MutableMappingSequence): def pop(self, *args, **kwargs): """Removes all items with the specified *key*.""" + if len(args) == 0 and len(kwargs) == 0: + if not self: + raise KeyError( + "pop(): {!s} ".format(type(self).__name__) + "is empty" + ) + + key, _ = item = self.__items.pop() + values = dict_getitem(self, key) + values.pop() + + if not values: + dict_delitem(self, key) + + return item + warnings.warn( "The pop(k) function removes " "all keys with value k to remain backwards compatible with the " @@ -314,9 +329,6 @@ class OrderedMultiDict(dict, MutableMappingSequence): FutureWarning, ) - if len(args) == 0 and len(kwargs) == 0: - return self.popitem() - return self.popall(*args, *kwargs) def popitem(self): @@ -329,22 +341,7 @@ class OrderedMultiDict(dict, MutableMappingSequence): "Consider using the list-like .pop(), without an argument instead.", FutureWarning, ) - # Yes, I know .pop() without an argument just redirects here, but it - # won't always. - - if not self: - raise KeyError( - "popitem(): {!s} ".format(type(self).__name__) + "is empty" - ) - - key, _ = item = self.__items.pop() - values = dict_getitem(self, key) - values.pop() - - if not values: - dict_delitem(self, key) - - return item + return self.pop() def copy(self): return type(self)(self) diff --git a/pvl/parser.py b/pvl/parser.py index bf3c04f..af34b4f 100644 --- a/pvl/parser.py +++ b/pvl/parser.py @@ -305,7 +305,7 @@ class PVLParser(object): """ raise Exception - def parse_aggregation_block(self, tokens: abc.Generator): + def parse_aggregation_block(self, tokens: abc.Generator): # noqa: C901 """Parses the tokens for an Aggregation Block, and returns the modcls object that is the result of the parsing and decoding. @@ -339,8 +339,18 @@ class PVLParser(object): # t = next(tokens) # print(f'parsing agg block, next token is: {t}') # tokens.send(t) - self.parse_end_aggregation(begin, block_name, tokens) - break + try: + self.parse_end_aggregation(begin, block_name, tokens) + break + except ValueError as ve: + try: + (agg, keep_parsing) = self.parse_module_post_hook( + agg, tokens + ) + if not keep_parsing: + raise ve + except Exception: + raise ve return block_name, agg @@ -839,7 +849,7 @@ class OmniParser(PVLParser): EmptyValueAtLine object. """ # It enables this by checking to see if the next thing is an - # '=' which means there was an empty assigment at the previous + # '=' which means there was an empty assignment at the previous # equals sign, and then unwinding the stack to give the # previous assignment the EmptyValueAtLine() object and trying # to continue parsing.
planetarypy/pvl
f27c43edf2e9635ac2b9322d5efdfb7eda52eb85
diff --git a/tests/test_parser.py b/tests/test_parser.py index 7a79c56..33b3ba6 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -394,3 +394,10 @@ class TestOmni(unittest.TestCase): PVLModule(foo="bar", weird="comments", baz="bang"), self.p.parse(some_pvl), ) + + def test_parse_aggregation_block(self): + tokens = Lexer("GROUP = name robert = bob = uncle END_GROUP") + self.assertEqual( + ("name", PVLGroup(robert="", bob="uncle")), + self.p.parse_aggregation_block(tokens) + ) \ No newline at end of file
Can't load most MER navcam labels Initially indicated by this [pdr Issue](https://github.com/MillionConcepts/pdr/issues/19). **To Reproduce** Attempt to run any *pvl* loader or `pvl_validate` a file like this one: https://pds-imaging.jpl.nasa.gov/data/mer/opportunity/mer1om_0xxx/data/navcam/site0003/1nn013eff03cyl00p1652l000m2.img
0.0
f27c43edf2e9635ac2b9322d5efdfb7eda52eb85
[ "tests/test_parser.py::TestOmni::test_parse_aggregation_block" ]
[ "tests/test_parser.py::TestParse::test_aggregation_cls", "tests/test_parser.py::TestParse::test_init_raise", "tests/test_parser.py::TestParse::test_init_wlexer", "tests/test_parser.py::TestParse::test_parse", "tests/test_parser.py::TestParse::test_parse_WSC_until", "tests/test_parser.py::TestParse::test_parse_aggregation_block", "tests/test_parser.py::TestParse::test_parse_around_equals", "tests/test_parser.py::TestParse::test_parse_assignment_statement", "tests/test_parser.py::TestParse::test_parse_begin_aggregation_statement", "tests/test_parser.py::TestParse::test_parse_end_aggregation", "tests/test_parser.py::TestParse::test_parse_end_statement", "tests/test_parser.py::TestParse::test_parse_module", "tests/test_parser.py::TestParse::test_parse_sequence", "tests/test_parser.py::TestParse::test_parse_set", "tests/test_parser.py::TestParse::test_parse_units", "tests/test_parser.py::TestParse::test_parse_value", "tests/test_parser.py::TestOmni::test_comments", "tests/test_parser.py::TestOmni::test_parse_module_post_hook" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-09-01 16:01:39+00:00
bsd-3-clause
4,580
planetlabs__planet-client-python-1014
diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 648c93e..cf29914 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -13,7 +13,8 @@ # the License. """Functionality for preparing subscription requests.""" from datetime import datetime -from typing import Any, Dict, Optional, List, Mapping, Sequence +from dataclasses import dataclass, asdict +from typing import Any, Dict, Optional, List, Mapping, Sequence, Union from typing_extensions import Literal @@ -651,3 +652,63 @@ def toar_tool(scale_factor: int = 10000) -> dict: reflectances not fitting in 16bit integers. """ return _tool('toar', {'scale_factor': scale_factor}) + + +@dataclass +class FilterValue: + """Represents a filter value with optional greater than or equal to (gte) + and less than or equal to (lte) constraints. + + Attributes: + gte (Optional[float]): The minimum threshold value for the filter. + lte (Optional[float]): The maximum threshold value for the filter. + """ + + gte: Optional[float] = None + lte: Optional[float] = None + + +def cloud_filter_tool( + clear_percent: Optional[FilterValue] = None, + cloud_percent: Optional[FilterValue] = None, + shadow_percent: Optional[FilterValue] = None, + heavy_haze_percent: Optional[FilterValue] = None, + light_haze_percent: Optional[FilterValue] = None, + snow_ice_percent: Optional[FilterValue] = None, +) -> Dict[str, Dict[str, Union[float, int]]]: + """Specify a subscriptions API cloud_filter tool. + + The cloud_filter tool filters imagery after the clip tool has run and certain + metadata values have been updated to pertain to the clip AOI. This tool offers + a more detailed filtering of cloudy imagery than what can be achieved using + only catalog source filters. For instance, you might want to receive only images + that, after clipping, have a cloud_percent value of less than or equal to 25%. + + Parameters: + clear_percent: Filters for images based on the percentage of clear sky. + cloud_percent: Filters for images based on the percentage of cloud cover. + shadow_percent: Filters for images based on the percentage of shadow cover. + heavy_haze_percent: Filters for images based on the percentage of heavy haze cover. + light_haze_percent: Filters for images based on the percentage of light haze cover. + snow_ice_percent: Filters for images based on the percentage of snow or ice cover. + """ + filters = { + "clear_percent": clear_percent, + "cloud_percent": cloud_percent, + "shadow_percent": shadow_percent, + "heavy_haze_percent": heavy_haze_percent, + "light_haze_percent": light_haze_percent, + "snow_ice_percent": snow_ice_percent, + } + + result = {} + + for key, value in filters.items(): + if value: + inner_dict = asdict(value) + result[key] = { + k: v + for k, v in inner_dict.items() if v is not None + } + + return _tool("cloud_filter", result)
planetlabs/planet-client-python
8b29a9300f8a144cc56a171f102b1a068fd6b692
diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index 1b09afa..01ff195 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -407,3 +407,22 @@ def test_catalog_source_time_range_type_acquired(geom_geojson): ) assert source["parameters"]["time_range_type"] == "acquired" + + +def test_cloud_filter_tool_success(): + res = subscription_request.cloud_filter_tool( + clear_percent=subscription_request.FilterValue(gte=90), + cloud_percent=subscription_request.FilterValue(lte=10, gte=5)) + expected = { + "type": "cloud_filter", + "parameters": { + "clear_percent": { + "gte": 90 + }, + "cloud_percent": { + "lte": 10, "gte": 5 + } + } + } + + assert res == expected
Add cloud_filter tool for Subscriptions API Make the `"cloud_filter"` tool available to use when creating a subscription.
0.0
8b29a9300f8a144cc56a171f102b1a068fd6b692
[ "tests/unit/test_subscription_request.py::test_cloud_filter_tool_success" ]
[ "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_failure", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success", "tests/unit/test_subscription_request.py::test_pv_source_success[biomass_proxy-BIOMASS-PROXY_V3.0_10]", "tests/unit/test_subscription_request.py::test_pv_source_success[var1-VAR1-ABCD]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages0]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages1]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages2]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages3]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages4]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages5]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages6]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages7]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages8]", "tests/unit/test_subscription_request.py::test_catalog_source_time_range_type_acquired" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2023-10-13 22:40:21+00:00
apache-2.0
4,581
planetlabs__planet-client-python-1018
diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 574a4ed..fd1273c 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -371,7 +371,8 @@ def _delivery(type: str, parameters: dict) -> dict: def amazon_s3(aws_access_key_id: str, aws_secret_access_key: str, bucket: str, - aws_region: str) -> dict: + aws_region: str, + path_prefix: Optional[str] = None) -> dict: """Delivery to Amazon S3. Parameters: @@ -379,6 +380,7 @@ def amazon_s3(aws_access_key_id: str, aws_secret_access_key: S3 account secret key. bucket: The name of the bucket that will receive the order output. aws_region: The region where the bucket lives in AWS. + path_prefix: Path prefix for deliveries. """ parameters = { 'aws_access_key_id': aws_access_key_id, @@ -387,13 +389,17 @@ def amazon_s3(aws_access_key_id: str, 'aws_region': aws_region, } + if path_prefix: + parameters['path_prefix'] = path_prefix + return _delivery('amazon_s3', parameters) def azure_blob_storage(account: str, container: str, sas_token: str, - storage_endpoint_suffix: Optional[str] = None) -> dict: + storage_endpoint_suffix: Optional[str] = None, + path_prefix: Optional[str] = None) -> dict: """Delivery to Azure Blob Storage. Parameters: @@ -403,6 +409,7 @@ def azure_blob_storage(account: str, without a leading '?'. storage_endpoint_suffix: Deliver order to a sovereign cloud. The default is "core.windows.net". + path_prefix: Path prefix for deliveries. """ parameters = { 'account': account, @@ -413,21 +420,30 @@ def azure_blob_storage(account: str, if storage_endpoint_suffix: parameters['storage_endpoint_suffix'] = storage_endpoint_suffix + if path_prefix: + parameters['path_prefix'] = path_prefix + return _delivery('azure_blob_storage', parameters) -def google_cloud_storage(credentials: str, bucket: str) -> dict: +def google_cloud_storage(credentials: str, + bucket: str, + path_prefix: Optional[str] = None) -> dict: """Delivery to Google Cloud Storage. Parameters: credentials: JSON-string of service account for bucket. bucket: GCS bucket name. + path_prefix: Path prefix for deliveries. """ parameters = { 'bucket': bucket, 'credentials': credentials, } + if path_prefix: + parameters['path_prefix'] = path_prefix + return _delivery('google_cloud_storage', parameters) @@ -435,7 +451,8 @@ def oracle_cloud_storage(customer_access_key_id: str, customer_secret_key: str, bucket: str, region: str, - namespace: str) -> dict: + namespace: str, + path_prefix: Optional[str] = None) -> dict: """Delivery to Oracle Cloud Storage. Parameters: @@ -444,6 +461,7 @@ def oracle_cloud_storage(customer_access_key_id: str, bucket: The name of the bucket that will receive the order output. region: The region where the bucket lives in Oracle. namespace: Object Storage namespace name. + path_prefix: Path prefix for deliveries. """ parameters = { 'customer_access_key_id': customer_access_key_id, @@ -453,6 +471,9 @@ def oracle_cloud_storage(customer_access_key_id: str, 'namespace': namespace } + if path_prefix: + parameters['path_prefix'] = path_prefix + return _delivery('oracle_cloud_storage', parameters)
planetlabs/planet-client-python
4873a4856ef3247ba6e5e5fd7949e90bddad7e11
diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index 01ff195..ef4e434 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -197,6 +197,25 @@ def test_amazon_s3_success(): } +def test_amazon_s3_path_prefix_success(): + res = subscription_request.amazon_s3(aws_access_key_id='keyid', + aws_secret_access_key='accesskey', + bucket='bucket', + aws_region='region', + path_prefix="prefix") + + assert res == { + "type": "amazon_s3", + "parameters": { + "aws_access_key_id": "keyid", + "aws_secret_access_key": "accesskey", + "bucket": "bucket", + "aws_region": "region", + "path_prefix": "prefix" + } + } + + def test_azure_blob_storage_success(): res = subscription_request.azure_blob_storage( account='act', @@ -215,6 +234,26 @@ def test_azure_blob_storage_success(): } +def test_azure_blob_storage_path_prefix_success(): + res = subscription_request.azure_blob_storage( + account='act', + container='container', + sas_token='sastoken', + storage_endpoint_suffix='suffix', + path_prefix="prefix") + + assert res == { + "type": "azure_blob_storage", + "parameters": { + "account": "act", + "container": "container", + "sas_token": "sastoken", + "storage_endpoint_suffix": "suffix", + "path_prefix": "prefix" + } + } + + def test_google_cloud_storage_success(): res = subscription_request.google_cloud_storage(credentials='cred', bucket='bucket') @@ -227,6 +266,19 @@ def test_google_cloud_storage_success(): } +def test_google_cloud_storage_path_prefix_success(): + res = subscription_request.google_cloud_storage(credentials='cred', + bucket='bucket', + path_prefix="prefix") + + assert res == { + "type": "google_cloud_storage", + "parameters": { + "bucket": "bucket", "credentials": "cred", "path_prefix": "prefix" + } + } + + def test_oracle_cloud_storage_success(): res = subscription_request.oracle_cloud_storage( customer_access_key_id='keyid', @@ -247,6 +299,28 @@ def test_oracle_cloud_storage_success(): } +def test_oracle_cloud_storage_path_prefix_success(): + res = subscription_request.oracle_cloud_storage( + customer_access_key_id='keyid', + customer_secret_key='secretkey', + bucket='bucket', + region='region', + namespace='namespace', + path_prefix="prefix") + + assert res == { + "type": "oracle_cloud_storage", + "parameters": { + "customer_access_key_id": "keyid", + "customer_secret_key": "secretkey", + "bucket": "bucket", + "region": "region", + "namespace": "namespace", + "path_prefix": "prefix" + } + } + + def test_notifications_success(): topics = ['delivery.success'] notifications_config = subscription_request.notifications(url='url',
Add path_prefix parameter to cloud delivery option I would like a parameter in the [delivery options ](https://github.com/planetlabs/planet-client-python/blob/4873a4856ef3247ba6e5e5fd7949e90bddad7e11/planet/subscription_request.py#L371)that allows me to add a prefix. I know the API allows it: [https://developers.planet.com/docs/subscriptions/delivery/...](https://developers.planet.com/docs/subscriptions/delivery/#:~:text=%22delivery%22%3A%20%7B%0A%20%20%20%20%22type%22%3A%20%22google_cloud_storage%22%2C%0A%20%20%20%20%22parameters%22%3A%20%7B%0A%20%20%20%20%20%20%20%20%22bucket%22%3A%20%22your%2Dgcs%2Dbucket%22%2C%0A%20%20%20%20%20%20%20%20%22credentials%22%3A%20%22c29tZWNyZWRzZm9yeW91cmdjc2J1Y2...%22%2C%0A%20%20%20%20%20%20%20%20%22path_prefix%22%3A%22optionalsubfolder1/optionalsubfolder2%22%0A%20%20%20%20%7D%0A%7D) **Describe the solution you'd like** **Describe alternatives you've considered** **Additional context**
0.0
4873a4856ef3247ba6e5e5fd7949e90bddad7e11
[ "tests/unit/test_subscription_request.py::test_amazon_s3_path_prefix_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_path_prefix_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_path_prefix_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_path_prefix_success" ]
[ "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_failure", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success", "tests/unit/test_subscription_request.py::test_pv_source_success[biomass_proxy-BIOMASS-PROXY_V3.0_10]", "tests/unit/test_subscription_request.py::test_pv_source_success[var1-VAR1-ABCD]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages0]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages1]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages2]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages3]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages4]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages5]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages6]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages7]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages8]", "tests/unit/test_subscription_request.py::test_catalog_source_time_range_type_acquired", "tests/unit/test_subscription_request.py::test_cloud_filter_tool_success" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-12-09 21:14:59+00:00
apache-2.0
4,582
planetlabs__planet-client-python-378
diff --git a/planet/auth.py b/planet/auth.py index 4730128..8540053 100644 --- a/planet/auth.py +++ b/planet/auth.py @@ -27,7 +27,7 @@ from .exceptions import AuthException LOGGER = logging.getLogger(__name__) -BASE_URL = constants.PLANET_BASE_URL +BASE_URL = constants.PLANET_BASE_URL + 'v0/auth/' ENV_API_KEY = 'PL_API_KEY' SECRET_FILE_PATH = os.path.join(os.path.expanduser('~'), '.planet.json') @@ -161,8 +161,6 @@ class AuthClient(): if not self._base_url.endswith('/'): self._base_url += '/' - self._auth_url = self._base_url + 'v0/auth/' - def login( self, email: str, @@ -181,7 +179,7 @@ class AuthClient(): A JSON object containing an `api_key` property with the user's API_KEY. ''' - url = self._auth_url + 'login' + url = self._base_url + 'login' data = {'email': email, 'password': password }
planetlabs/planet-client-python
217310866988d7381e98755bfe2d10efee844d75
diff --git a/tests/integration/test_auth_api.py b/tests/integration/test_auth_api.py index a23510d..0a6e532 100644 --- a/tests/integration/test_auth_api.py +++ b/tests/integration/test_auth_api.py @@ -1,4 +1,4 @@ -# Copyright 2021 Planet Labs, Inc. +# Copyright 2021 Planet Labs, PBC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of @@ -24,14 +24,13 @@ from planet.auth import AuthClient TEST_URL = 'http://MockNotRealURL/' -AUTH_URL = TEST_URL + 'v0/auth/' LOGGER = logging.getLogger(__name__) @respx.mock def test_AuthClient_success(): - login_url = AUTH_URL + 'login' + login_url = TEST_URL + 'login' payload = {'api_key': 'iamakey'} resp = {'token': jwt.encode(payload, 'key')} @@ -46,7 +45,7 @@ def test_AuthClient_success(): @respx.mock def test_AuthClient_invalid_email(): - login_url = AUTH_URL + 'login' + login_url = TEST_URL + 'login' resp = { "errors": { @@ -69,7 +68,7 @@ def test_AuthClient_invalid_email(): @respx.mock def test_AuthClient_invalid_password(): - login_url = AUTH_URL + 'login' + login_url = TEST_URL + 'login' resp = { "errors": None, diff --git a/tests/integration/test_auth_cli.py b/tests/integration/test_auth_cli.py new file mode 100644 index 0000000..02a57f0 --- /dev/null +++ b/tests/integration/test_auth_cli.py @@ -0,0 +1,55 @@ +# Copyright 2022 Planet Labs, PBC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +from http import HTTPStatus +from unittest.mock import MagicMock + +from click.testing import CliRunner +import httpx +import jwt +import pytest +import respx + +import planet +from planet.cli import cli + +TEST_URL = 'http://MockNotRealURL/' + + [email protected](autouse=True) +def patch_session(monkeypatch): + '''Make sure we don't actually make any http calls''' + monkeypatch.setattr(planet, 'Session', MagicMock(spec=planet.Session)) + + [email protected] [email protected] +def test_cli_auth_init_base_url(): + '''Test base url option + + Uses the auth init path to ensure the base url is changed to the mocked + url. So, ends up testing the auth init path somewhat as well + ''' + login_url = TEST_URL + 'login' + + payload = {'api_key': 'iamakey'} + resp = {'token': jwt.encode(payload, 'key')} + mock_resp = httpx.Response(HTTPStatus.OK, json=resp) + respx.post(login_url).return_value = mock_resp + + result = CliRunner().invoke( + cli.main, + args=['auth', '--base-url', TEST_URL, 'init'], + input='email\npw\n') + + assert not result.exception
change auth base url to the api url The orders client base url points to the orders API endpoint. Changing the base url replaces the entire url path. Right now, the auth client base url is just the root url, and the relative path to the auth API endpoint is added in the client. Changing the base url only changes the root url. Bring the functionality of the auth client (and, therefore, auth CLI) into alignment with the functionality of the orders api: that is, change the auth base url to the api endpoint.
0.0
217310866988d7381e98755bfe2d10efee844d75
[ "tests/integration/test_auth_api.py::test_AuthClient_success", "tests/integration/test_auth_api.py::test_AuthClient_invalid_email", "tests/integration/test_auth_api.py::test_AuthClient_invalid_password", "tests/integration/test_auth_cli.py::test_cli_auth_init_base_url" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-01-22 22:55:44+00:00
apache-2.0
4,583
planetlabs__planet-client-python-389
diff --git a/planet/cli/orders.py b/planet/cli/orders.py index ab03c78..a30b561 100644 --- a/planet/cli/orders.py +++ b/planet/cli/orders.py @@ -118,12 +118,11 @@ async def download(ctx, order_id, quiet, overwrite, dest): with planet.reporting.StateBar(order_id=order_id, disable=quiet) as bar: await cl.poll(str(order_id), report=bar.update) - filenames = await cl.download_order( + _ = await cl.download_order( str(order_id), directory=dest, overwrite=overwrite, progress_bar=not quiet) - click.echo(f'Downloaded {len(filenames)} files.') def split_id_list(ctx, param, value):
planetlabs/planet-client-python
cd36c14e0e38ae07d7ac04527129c24004b3fa2f
diff --git a/tests/unit/test_cli_orders.py b/tests/unit/test_cli_orders.py index ff9c16a..ee1f4a0 100644 --- a/tests/unit/test_cli_orders.py +++ b/tests/unit/test_cli_orders.py @@ -89,8 +89,8 @@ def test_cli_orders_download(runner, patch_ordersclient, oid): return patch_ordersclient('poll', poll) - # Number of files in all_test_files - expected = 'Downloaded 4 files.\n' + # Download should not report anything + expected = '' # allow for some progress reporting result = runner.invoke(
Remove reported data from CLI download Currently, when requesting a download, the CLI reports the number of files in an order. For example, for an order with 12 files here's what one would see to stdout: ``` ❯ planet orders download my-oid Downloaded 12 files. ``` **Proposal:** After [lengthy discussions](https://github.com/planetlabs/planet-client-python/pull/355#issuecomment-1010215231), I propose that the CLI should remain silent upon a download request. Specifically, the download request should not output to stdout nor stderr, but using the INFO log, which already exists in `body.write()`, to communicate the files downloaded. **To do:** 1) Remove `click.echo(f'Downloaded {len(filenames)} files.')` in `planet.cli.orders.download()` (line 126) 2) Update the CLI's test in `tests.unit.test_cli_orders.test_cli_orders_download()` to no longer expect "`'Downloaded 4 files.\n'`"
0.0
cd36c14e0e38ae07d7ac04527129c24004b3fa2f
[ "tests/unit/test_cli_orders.py::test_cli_orders_download" ]
[ "tests/unit/test_cli_orders.py::test_cli_orders_list_empty", "tests/unit/test_cli_orders.py::test_cli_orders_list_success", "tests/unit/test_cli_orders.py::test_cli_orders_get", "tests/unit/test_cli_orders.py::test_cli_orders_cancel", "tests/unit/test_cli_orders.py::test_cli_read_file_geojson", "tests/unit/test_cli_orders.py::test_cli_orders_create_cloudconfig", "tests/unit/test_cli_orders.py::test_cli_orders_create_clip", "tests/unit/test_cli_orders.py::test_cli_orders_create_tools", "tests/unit/test_cli_orders.py::test_cli_orders_create_validate_id", "tests/unit/test_cli_orders.py::test_cli_orders_create_validate_item_type", "tests/unit/test_cli_orders.py::test_cli_orders_create_validate_cloudconfig", "tests/unit/test_cli_orders.py::test_cli_orders_create_validate_tools", "tests/unit/test_cli_orders.py::test_cli_orders_create_validate_clip" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-02-02 21:02:47+00:00
apache-2.0
4,584
planetlabs__planet-client-python-451
diff --git a/docs/guide.md b/docs/guide.md index 757874d..d594900 100644 --- a/docs/guide.md +++ b/docs/guide.md @@ -183,7 +183,7 @@ the context of a `Session` with the `OrdersClient`: >>> async def main(): ... async with Session() as sess: ... cl = OrdersClient(sess) -... order_id = await cl.create_order(request) +... order = await cl.create_order(request) ... >>> asyncio.run(main()) @@ -210,13 +210,13 @@ from planet import reporting ... with reporting.StateBar(state='creating') as bar: ... # create order ... order = await cl.create_order(request) -... bar.update(state='created', order_id=order.id) +... bar.update(state='created', order_id=order['id']) ... ... # poll -... await cl.poll(order.id, report=bar.update) +... await cl.poll(order['id'], report=bar.update) ... ... # download -... await cl.download_order(order.id) +... await cl.download_order(order['id']) ... >>> asyncio.run(create_poll_and_download()) ``` diff --git a/planet/__init__.py b/planet/__init__.py index 504c209..8eb5800 100644 --- a/planet/__init__.py +++ b/planet/__init__.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. from .http import Session -from .models import Order from . import order_request, reporting from .__version__ import __version__ # NOQA from .auth import Auth @@ -21,7 +20,6 @@ from .clients import OrdersClient __all__ = [ Session, OrdersClient, - Order, order_request, reporting, Auth, diff --git a/planet/cli/cli.py b/planet/cli/cli.py index d463144..726debb 100644 --- a/planet/cli/cli.py +++ b/planet/cli/cli.py @@ -30,8 +30,13 @@ LOGGER = logging.getLogger(__name__) default="warning", help=("Optional: set verbosity level to warning, info, or debug.\ Defaults to warning.")) [email protected]('--quiet', + is_flag=True, + default=False, + help='Disable ANSI control output.') @click.version_option(version=planet.__version__) -def main(ctx, verbosity): [email protected]_option(version=planet.__version__) +def main(ctx, verbosity, quiet): """Planet API Client Parameters: ctx -- context object @@ -41,6 +46,7 @@ def main(ctx, verbosity): # ensure that ctx.obj exists and is a dict (in case `cli()` is called # by means other than the `if` block below) ctx.ensure_object(dict) + ctx.obj['QUIET'] = quiet def _configure_logging(verbosity): diff --git a/planet/cli/orders.py b/planet/cli/orders.py index 01f677e..e1da200 100644 --- a/planet/cli/orders.py +++ b/planet/cli/orders.py @@ -66,9 +66,10 @@ def orders(ctx, base_url): async def list(ctx, state, limit, pretty): '''List orders''' async with orders_client(ctx) as cl: - orders = await cl.list_orders(state=state, limit=limit, as_json=True) + orders = await cl.list_orders(state=state, limit=limit) + orders_list = [o async for o in orders] - echo_json(orders, pretty) + echo_json(orders_list, pretty) @orders.command() @@ -85,7 +86,7 @@ async def get(ctx, order_id, pretty): async with orders_client(ctx) as cl: order = await cl.get_order(str(order_id)) - echo_json(order.json, pretty) + echo_json(order, pretty) @orders.command() @@ -135,15 +136,11 @@ def split_list_arg(ctx, param, value): type=int, default=5, help='Maximum number of polls. Set to zero for no limit.') [email protected]('--quiet', - is_flag=True, - default=False, - help='Disable ANSI control output.') @click.option('--state', help='State prior to a final state that will end polling.', type=click.Choice(planet.clients.orders.ORDER_STATE_SEQUENCE, case_sensitive=False)) -async def wait(ctx, order_id, delay, max_attempts, quiet, state): +async def wait(ctx, order_id, delay, max_attempts, state): """Wait until order reaches desired state. Reports the state of the order on the last poll. @@ -165,6 +162,7 @@ async def wait(ctx, order_id, delay, max_attempts, quiet, state): If --state is specified, polling will complete when the specified earlier state is reached or passed. """ + quiet = ctx.obj['QUIET'] async with orders_client(ctx) as cl: with planet.reporting.StateBar(order_id=order_id, disable=quiet) as bar: @@ -181,11 +179,6 @@ async def wait(ctx, order_id, delay, max_attempts, quiet, state): @translate_exceptions @coro @click.argument('order_id', type=click.UUID) [email protected]('-q', - '--quiet', - is_flag=True, - default=False, - help='Disable ANSI control output.') @click.option('-o', '--overwrite', is_flag=True, @@ -198,8 +191,9 @@ async def wait(ctx, order_id, delay, max_attempts, quiet, state): resolve_path=True, writable=True, file_okay=False)) -async def download(ctx, order_id, quiet, overwrite, dest): +async def download(ctx, order_id, overwrite, dest): """Download order by order ID.""" + quiet = ctx.obj['QUIET'] async with orders_client(ctx) as cl: await cl.download_order(str(order_id), directory=dest, @@ -319,4 +313,4 @@ async def create(ctx, async with orders_client(ctx) as cl: order = await cl.create_order(request) - echo_json(order.json, pretty) + echo_json(order, pretty) diff --git a/planet/clients/orders.py b/planet/clients/orders.py index dfa7ffb..82da519 100644 --- a/planet/clients/orders.py +++ b/planet/clients/orders.py @@ -22,7 +22,7 @@ import uuid from .. import exceptions from ..constants import PLANET_BASE_URL from ..http import Session -from ..models import Order, Orders, Request, Response, StreamingBody +from ..models import Paged, Request, Response, StreamingBody BASE_URL = f'{PLANET_BASE_URL}/compute/ops' STATS_PATH = '/stats/orders/v2' @@ -37,6 +37,14 @@ ORDER_STATE_SEQUENCE = \ LOGGER = logging.getLogger(__name__) +class Orders(Paged): + '''Asynchronous iterator over Orders from a paged response describing + orders.''' + LINKS_KEY = '_links' + NEXT_KEY = 'next' + ITEMS_KEY = 'orders' + + class OrderStates(): SEQUENCE = ORDER_STATE_SEQUENCE @@ -114,7 +122,7 @@ class OrdersClient(): ''' return await self._session.request(request) - async def create_order(self, request: dict) -> str: + async def create_order(self, request: dict) -> dict: '''Create an order request. Example: @@ -132,17 +140,16 @@ class OrdersClient(): ... ) ... async with Session() as sess: ... cl = OrdersClient(sess) - ... order_id = await cl.create_order(request) + ... order = await cl.create_order(request) ... >>> asyncio.run(main()) - ``` Parameters: request: order request definition Returns: - The ID of the order + JSON description of the created order Raises: planet.exceptions.APIError: On API error. @@ -151,18 +158,16 @@ class OrdersClient(): req = self._request(url, method='POST', json=request) resp = await self._do_request(req) + return resp.json() - order = Order(resp.json()) - return order - - async def get_order(self, order_id: str) -> Order: + async def get_order(self, order_id: str) -> dict: '''Get order details by Order ID. Parameters: order_id: The ID of the order Returns: - Order information + JSON description of the order Raises: planet.exceptions.ClientError: If order_id is not a valid UUID. @@ -173,9 +178,7 @@ class OrdersClient(): req = self._request(url, method='GET') resp = await self._do_request(req) - - order = Order(resp.json()) - return order + return resp.json() async def cancel_order(self, order_id: str) -> dict: '''Cancel a queued order. @@ -290,14 +293,16 @@ class OrdersClient(): state. """ order = await self.get_order(order_id) - if not OrderStates.is_final(order.state): + + order_state = order['state'] + if not OrderStates.is_final(order_state): raise exceptions.ClientError( 'Order cannot be downloaded because the order state ' - f'({order.state}) is not a final state. ' + f'({order_state}) is not a final state. ' 'Consider using wait functionality before ' 'attempting to download.') - locations = order.locations + locations = self._get_order_locations(order) LOGGER.info( f'downloading {len(locations)} assets from order {order_id}') @@ -310,6 +315,12 @@ class OrdersClient(): ] return filenames + @staticmethod + def _get_order_locations(order): + links = order['_links'] + results = links.get('results', None) + return list(r['location'] for r in results if r) + async def wait(self, order_id: str, state: str = None, @@ -372,12 +383,12 @@ class OrdersClient(): t = time.time() order = await self.get_order(order_id) - current_state = order.state + current_state = order['state'] - LOGGER.debug(state) + LOGGER.debug(current_state) if callback: - callback(order.state) + callback(current_state) if OrderStates.is_final(current_state) or \ (state and OrderStates.reached(state, current_state)): @@ -395,17 +406,12 @@ class OrdersClient(): return current_state - async def list_orders( - self, - state: str = None, - limit: int = None, - as_json: bool = False) -> typing.Union[typing.List[Order], dict]: + async def list_orders(self, state: str = None, limit: int = None): """Get all order requests. Parameters: state: Filter orders to given state. limit: Limit orders to given limit. - as_json: Return orders as a json dict. Returns: User orders that match the query @@ -425,15 +431,5 @@ class OrdersClient(): else: params = None - orders = await self._get_orders(url, params=params, limit=limit) - - if as_json: - ret = [o.json async for o in orders] - else: - ret = [o async for o in orders] - return ret - - async def _get_orders(self, url, params=None, limit=None): request = self._request(url, 'GET', params=params) - return Orders(request, self._do_request, limit=limit) diff --git a/planet/models.py b/planet/models.py index af01e7b..482126c 100644 --- a/planet/models.py +++ b/planet/models.py @@ -14,7 +14,6 @@ """Manage data for requests and responses.""" import copy from datetime import datetime -import json import logging import mimetypes import random @@ -347,73 +346,3 @@ class Paged(): LOGGER.debug('end of the pages') next_link = False return next_link - - -class Order(): - '''Managing description of an order returned from Orders API. - - :param data: Response json describing order - :type data: dict - ''' - LINKS_KEY = '_links' - RESULTS_KEY = 'results' - LOCATION_KEY = 'location' - - def __init__(self, data): - self.data = data - - def __str__(self): - return "<Order> " + json.dumps(self.data) - - @property - def results(self): - '''Results for each item in order. - - :return: result for each item in order - :rtype: list of dict - ''' - links = self.data[self.LINKS_KEY] - results = links.get(self.RESULTS_KEY, None) - return results - - @property - def locations(self): - '''Download locations for order results. - - :return: download locations in order - :rtype: list of str - ''' - return list(r[self.LOCATION_KEY] for r in self.results) - - @property - def state(self): - '''State of the order. - - :return: state of order - :rtype: str - ''' - return self.data['state'] - - @property - def id(self): - '''ID of the order. - - :return: id of order - :rtype: str - ''' - return self.data['id'] - - @property - def json(self): - return self.data - - -class Orders(Paged): - '''Asynchronous iterator over Orders from a paged response describing - orders.''' - LINKS_KEY = '_links' - NEXT_KEY = 'next' - ITEMS_KEY = 'orders' - - async def __anext__(self): - return Order(await super().__anext__())
planetlabs/planet-client-python
9fd238ba96bcd828e39b8cac3dda54efdc2f7e83
diff --git a/tests/integration/test_orders_api.py b/tests/integration/test_orders_api.py index 53f991d..c542ece 100644 --- a/tests/integration/test_orders_api.py +++ b/tests/integration/test_orders_api.py @@ -113,9 +113,7 @@ async def test_list_orders_basic(order_descriptions, session): cl = OrdersClient(session, base_url=TEST_URL) orders = await cl.list_orders() - - oids = list(o.id for o in orders) - assert oids == ['oid1', 'oid2', 'oid3'] + assert order_descriptions == [o async for o in orders] @respx.mock @@ -138,9 +136,7 @@ async def test_list_orders_state(order_descriptions, session): # if the value of state doesn't get sent as a url parameter, # the mock will fail and this test will fail orders = await cl.list_orders(state='failed') - - oids = list(o.id for o in orders) - assert oids == ['oid1', 'oid2'] + assert [order1, order2] == [o async for o in orders] @pytest.mark.asyncio @@ -172,23 +168,7 @@ async def test_list_orders_limit(order_descriptions, session): # since nono_page_url is not mocked, an error will occur if the client # attempts to access the next page when the limit is already reached orders = await cl.list_orders(limit=1) - - oids = [o.id for o in orders] - assert oids == ['oid1'] - - [email protected] [email protected] -async def test_list_orders_asjson(order_descriptions, session): - order1, order2, order3 = order_descriptions - - page1_response = {"_links": {"_self": "string"}, "orders": [order1]} - mock_resp1 = httpx.Response(HTTPStatus.OK, json=page1_response) - respx.get(TEST_ORDERS_URL).return_value = mock_resp1 - - cl = OrdersClient(session, base_url=TEST_URL) - orders = await cl.list_orders(as_json=True) - assert orders[0]['id'] == 'oid1' + assert [order1] == [o async for o in orders] @respx.mock @@ -200,7 +180,7 @@ async def test_create_order(oid, order_description, order_request, session): cl = OrdersClient(session, base_url=TEST_URL) order = await cl.create_order(order_request) - assert order.json == order_description + assert order == order_description @respx.mock @@ -259,8 +239,7 @@ async def test_get_order(oid, order_description, session): cl = OrdersClient(session, base_url=TEST_URL) order = await cl.get_order(oid) - - assert order.state == 'queued' + assert order_description == order @pytest.mark.asyncio diff --git a/tests/integration/test_orders_cli.py b/tests/integration/test_orders_cli.py index 9d22d7a..4faa4dd 100644 --- a/tests/integration/test_orders_cli.py +++ b/tests/integration/test_orders_cli.py @@ -231,21 +231,6 @@ def test_cli_orders_wait_max_attempts(invoke, order_description, oid): 'Error: Maximum number of attempts (1) reached.\n') [email protected] -def test_cli_orders_wait_quiet(invoke, order_description, oid): - get_url = f'{TEST_ORDERS_URL}/{oid}' - - order_description['state'] = 'success' - - route = respx.get(get_url) - route.side_effect = [httpx.Response(HTTPStatus.OK, json=order_description)] - - runner = CliRunner() - result = invoke(['wait', '--delay', '0', '--quiet', oid], runner=runner) - assert not result.exception - assert result.output == 'success\n' - - @pytest.fixture def mock_download_response(oid, order_description): @@ -347,16 +332,6 @@ def test_cli_orders_download_overwrite(invoke, assert json.load(open(filepath)) == {'key': 'value'} [email protected] -def test_cli_orders_download_quiet(invoke, mock_download_response, oid): - mock_download_response() - - runner = CliRunner() - with runner.isolated_filesystem(): - result = invoke(['download', '--quiet', oid], runner=runner) - assert not result.exception - - @respx.mock def test_cli_orders_download_state(invoke, order_description, oid): get_url = f'{TEST_ORDERS_URL}/{oid}' diff --git a/tests/unit/test_cli_main.py b/tests/unit/test_cli_main.py index 9fb3715..8a90ab4 100644 --- a/tests/unit/test_cli_main.py +++ b/tests/unit/test_cli_main.py @@ -28,6 +28,26 @@ def debug_input(): return ['debug', ' debug ', 'debu', 45] +def test_cli_orders_quiet(): + + runner = CliRunner() + + # Valid and invalid inputs for the "quiet" flag + valid_quiet_inputs = ['--quiet', ' --quiet', ' --quiet '] + invalid_quiet_inputs = ['--quiet ', '-q', '--not_a_valid_input', 123] + + # Test the valid quiet inputs + for quiet_input in valid_quiet_inputs: + valid_result = runner.invoke(cli.main, args=[quiet_input, 'orders']) + assert not valid_result.exception + assert valid_result.exit_code == 0 + # Test the invalid quiet inputs + for quiet_input in invalid_quiet_inputs: + invalid_result = runner.invoke(cli.main, args=[quiet_input, 'orders']) + assert invalid_result.exception + assert invalid_result.exit_code != 0 + + def test_cli_info_verbosity(monkeypatch): log_level = None diff --git a/tests/unit/test_models.py b/tests/unit/test_models.py index fea2549..38b6f70 100644 --- a/tests/unit/test_models.py +++ b/tests/unit/test_models.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import copy import logging import math from unittest.mock import MagicMock @@ -179,59 +178,7 @@ def get_pages(): return do_get [email protected] -async def test_Paged_iterator(get_pages): - req = MagicMock() - paged = models.Paged(req, get_pages) - assert [1, 2, 3, 4] == [i async for i in paged] - - [email protected] -async def test_Paged_limit(get_pages): - req = MagicMock() - paged = models.Paged(req, get_pages, limit=3) - assert [1, 2, 3] == [i async for i in paged] - - [email protected] -def get_orders_pages(orders_page): - page2 = copy.deepcopy(orders_page) - del page2['_links']['next'] - responses = [ - mock_http_response(json=orders_page), mock_http_response(json=page2) - ] - - async def do_get(req): - return responses.pop(0) - - return do_get - - [email protected] -async def test_Orders(get_orders_pages): - req = MagicMock() - orders = models.Orders(req, get_orders_pages) - expected_ids = [ - 'f05b1ed7-11f0-43da-960c-a624f7c355c8', - '8d4799c4-5291-40c0-a7f5-adb9a974455d', - 'f05b1ed7-11f0-43da-960c-a624f7c355c8', - '8d4799c4-5291-40c0-a7f5-adb9a974455d' - ] - assert expected_ids == [o.id async for o in orders] - - -def test_Order_results(order_description): - order = models.Order(order_description) - assert len(order.results) == 3 - - -def test_Order_locations(order_description): - order = models.Order(order_description) - expected_locations = ['location1', 'location2', 'location3'] - assert order.locations == expected_locations - - -def test_last_modified_emptyheader(): +def test_StreamingBody_last_modified_emptyheader(): '''This function tests the last_modified function for an empty header, by seeing if the last_modified is None. ''' @@ -252,7 +199,7 @@ def test_last_modified_emptyheader(): assert output == expected -def test_last_modified_completeheader(): +def test_StreamingBody_last_modified_completeheader(): '''This function tests the last_modified function for an existing header, by comparing the last_modified date to an expected output. @@ -274,3 +221,17 @@ def test_last_modified_completeheader(): expected = datetime.strptime(hr.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT') assert output == expected + + [email protected] +async def test_Paged_iterator(get_pages): + req = MagicMock() + paged = models.Paged(req, get_pages) + assert [1, 2, 3, 4] == [i async for i in paged] + + [email protected] +async def test_Paged_limit(get_pages): + req = MagicMock() + paged = models.Paged(req, get_pages, limit=3) + assert [1, 2, 3] == [i async for i in paged]
change orders python api response from Order object to json description of the order Change orders python api response from Order object to json description of the order This affects: - list orders (remove as_json option) - get order - cancel order
0.0
9fd238ba96bcd828e39b8cac3dda54efdc2f7e83
[ "tests/integration/test_orders_api.py::test_list_orders_basic", "tests/integration/test_orders_api.py::test_list_orders_state", "tests/integration/test_orders_api.py::test_list_orders_limit", "tests/integration/test_orders_api.py::test_create_order", "tests/integration/test_orders_api.py::test_get_order", "tests/unit/test_cli_main.py::test_cli_orders_quiet" ]
[ "tests/integration/test_orders_api.py::test_OrderStates_reached", "tests/integration/test_orders_api.py::test_OrderStates_passed", "tests/integration/test_orders_api.py::test_list_orders_state_invalid_state", "tests/integration/test_orders_api.py::test_create_order_bad_item_type", "tests/integration/test_orders_api.py::test_create_order_item_id_does_not_exist", "tests/integration/test_orders_api.py::test_get_order_invalid_id", "tests/integration/test_orders_api.py::test_get_order_id_doesnt_exist", "tests/integration/test_orders_api.py::test_cancel_order", "tests/integration/test_orders_api.py::test_cancel_order_invalid_id", "tests/integration/test_orders_api.py::test_cancel_order_id_doesnt_exist", "tests/integration/test_orders_api.py::test_cancel_order_id_cannot_be_cancelled", "tests/integration/test_orders_api.py::test_cancel_orders_by_ids", "tests/integration/test_orders_api.py::test_cancel_orders_by_ids_invalid_id", "tests/integration/test_orders_api.py::test_cancel_orders_all", "tests/integration/test_orders_api.py::test_wait_default", "tests/integration/test_orders_api.py::test_wait_callback", "tests/integration/test_orders_api.py::test_wait_state", "tests/integration/test_orders_api.py::test_wait_max_attempts_enabled", "tests/integration/test_orders_api.py::test_wait_max_attempts_disabled", "tests/integration/test_orders_api.py::test_wait_invalid_oid", "tests/integration/test_orders_api.py::test_wait_invalid_state", "tests/integration/test_orders_api.py::test_aggegated_order_stats", "tests/integration/test_orders_api.py::test_download_asset_md", "tests/integration/test_orders_api.py::test_download_asset_img", "tests/integration/test_orders_api.py::test_download_order_success", "tests/integration/test_orders_api.py::test_download_order_state", "tests/integration/test_orders_api.py::test_download_order_overwrite_true_preexisting_data", "tests/integration/test_orders_api.py::test_download_order_overwrite_false_preexisting_data", "tests/integration/test_orders_api.py::test_download_order_overwrite_true_nonexisting_data", "tests/integration/test_orders_api.py::test_download_order_overwrite_false_nonexisting_data", "tests/integration/test_orders_cli.py::test_split_list_arg_empty_string", "tests/integration/test_orders_cli.py::test_split_list_arg_None", "tests/integration/test_orders_cli.py::test_cli_orders_list_basic", "tests/integration/test_orders_cli.py::test_cli_orders_list_empty", "tests/integration/test_orders_cli.py::test_cli_orders_list_state", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit", "tests/integration/test_orders_cli.py::test_cli_orders_list_pretty", "tests/integration/test_orders_cli.py::test_cli_orders_get", "tests/integration/test_orders_cli.py::test_cli_orders_get_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_cancel", "tests/integration/test_orders_cli.py::test_cli_orders_cancel_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_wait_default", "tests/integration/test_orders_cli.py::test_cli_orders_wait_max_attempts", "tests/integration/test_orders_cli.py::test_cli_orders_download_default", "tests/integration/test_orders_cli.py::test_cli_orders_download_dest", "tests/integration/test_orders_cli.py::test_cli_orders_download_overwrite", "tests/integration/test_orders_cli.py::test_cli_orders_download_state", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_item_type_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_create_id_empty", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip_featureclass", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip_invalid_geometry", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip_and_tools", "tests/integration/test_orders_cli.py::test_cli_orders_create_cloudconfig", "tests/integration/test_orders_cli.py::test_cli_orders_create_email", "tests/integration/test_orders_cli.py::test_cli_orders_create_tools", "tests/integration/test_orders_cli.py::test_cli_orders_read_file_json_doesnotexist", "tests/integration/test_orders_cli.py::test_cli_orders_read_file_json_invalidjson", "tests/unit/test_cli_main.py::test_cli_info_verbosity", "tests/unit/test_models.py::test_StreamingBody_name", "tests/unit/test_models.py::test__get_filename_from_headers[headers0-open_california.tif]", "tests/unit/test_models.py::test__get_filename_from_headers[headers1-None]", "tests/unit/test_models.py::test__get_filename_from_headers[headers2-None]", "tests/unit/test_models.py::test__get_filename_from_url[url0-None]", "tests/unit/test_models.py::test__get_filename_from_url[url1-None]", "tests/unit/test_models.py::test__get_filename_from_url[url2-example.tif]", "tests/unit/test_models.py::test__get_filename_from_url[url3-example.tif]", "tests/unit/test_models.py::test__get_filename_from_url[url4-example.tif]", "tests/unit/test_models.py::test__get_random_filename[None-<lambda>]", "tests/unit/test_models.py::test__get_random_filename[image/tiff-<lambda>]", "tests/unit/test_models.py::test_StreamingBody_write_img", "tests/unit/test_models.py::test_StreamingBody_last_modified_emptyheader", "tests/unit/test_models.py::test_StreamingBody_last_modified_completeheader", "tests/unit/test_models.py::test_Paged_iterator", "tests/unit/test_models.py::test_Paged_limit" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-04-27 00:47:00+00:00
apache-2.0
4,585
planetlabs__planet-client-python-581
diff --git a/planet/cli/orders.py b/planet/cli/orders.py index a290c63..90b7f47 100644 --- a/planet/cli/orders.py +++ b/planet/cli/orders.py @@ -71,8 +71,8 @@ async def list(ctx, state, limit, pretty): ''' async with orders_client(ctx) as cl: orders = await cl.list_orders(state=state, limit=limit) - orders_list = [o async for o in orders] - echo_json(orders_list, pretty) + async for o in orders: + echo_json(o, pretty) @orders.command()
planetlabs/planet-client-python
a65a117ebba604a9c0e0c5dd2a95b5e519255ad3
diff --git a/tests/integration/test_orders_cli.py b/tests/integration/test_orders_cli.py index 38275c6..3afce4a 100644 --- a/tests/integration/test_orders_cli.py +++ b/tests/integration/test_orders_cli.py @@ -72,8 +72,9 @@ def test_cli_orders_list_basic(invoke, order_descriptions): respx.get(next_page_url).return_value = mock_resp2 result = invoke(['list']) - assert not result.exception - assert json.dumps([order1, order2, order3]) + '\n' == result.output + assert result.exit_code == 0 + sequence = '\n'.join([json.dumps(o) for o in [order1, order2, order3]]) + assert result.output == sequence + '\n' @respx.mock @@ -83,8 +84,8 @@ def test_cli_orders_list_empty(invoke): respx.get(TEST_ORDERS_URL).return_value = mock_resp result = invoke(['list']) - assert not result.exception - assert [] == json.loads(result.output) + assert result.exit_code == 0 + assert result.output == '' @respx.mock @@ -104,8 +105,9 @@ def test_cli_orders_list_state(invoke, order_descriptions): # if the value of state doesn't get sent as a url parameter, # the mock will fail and this test will fail result = invoke(['list', '--state', 'failed']) - assert not result.exception - assert [order1, order2] == json.loads(result.output) + assert result.exit_code == 0 + sequence = '\n'.join([json.dumps(o) for o in [order1, order2]]) + assert result.output == sequence + '\n' @respx.mock @@ -137,8 +139,9 @@ def test_cli_orders_list_limit(invoke, respx.get(TEST_ORDERS_URL).return_value = mock_resp result = invoke(['list', '--limit', limit]) - assert not result.exception - assert len(json.loads(result.output)) == limited_list_length + assert result.exit_code == 0 + count = len(result.output.strip().split('\n')) + assert count == limited_list_length @respx.mock @@ -155,8 +158,8 @@ def test_cli_orders_list_pretty(invoke, monkeypatch, order_description): respx.get(TEST_ORDERS_URL).return_value = mock_resp result = invoke(['list', '--pretty']) - assert not result.exception - mock_echo_json.assert_called_once_with([order_description], True) + assert result.exit_code == 0 + mock_echo_json.assert_called_once_with(order_description, True) # TODO: add tests for "get --pretty" (gh-491).
planet orders list cli command should return sequence Change `planet orders list` cli command to return a sequence of json descriptions of the orders instead of a list. something like... ``` async def list(ctx, state, limit, pretty): '''List orders This command prints a sequence of the returned order descriptions, optionally pretty-printed. ''' async with orders_client(ctx) as cl: orders = await cl.list_orders(state=state, limit=limit) async for o in orders: echo_json(o, pretty) # orders_list = [o async for o in orders] # echo_json(orders_list, pretty) ```
0.0
a65a117ebba604a9c0e0c5dd2a95b5e519255ad3
[ "tests/integration/test_orders_cli.py::test_cli_orders_list_basic", "tests/integration/test_orders_cli.py::test_cli_orders_list_empty", "tests/integration/test_orders_cli.py::test_cli_orders_list_state", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[None-100]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[0-102]", "tests/integration/test_orders_cli.py::test_cli_orders_list_pretty" ]
[ "tests/integration/test_orders_cli.py::test_split_list_arg_empty_string", "tests/integration/test_orders_cli.py::test_split_list_arg_None", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[1-1]", "tests/integration/test_orders_cli.py::test_cli_orders_get", "tests/integration/test_orders_cli.py::test_cli_orders_get_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_cancel", "tests/integration/test_orders_cli.py::test_cli_orders_cancel_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_wait_default", "tests/integration/test_orders_cli.py::test_cli_orders_wait_max_attempts", "tests/integration/test_orders_cli.py::test_cli_orders_download_default", "tests/integration/test_orders_cli.py::test_cli_orders_download_dest", "tests/integration/test_orders_cli.py::test_cli_orders_download_overwrite", "tests/integration/test_orders_cli.py::test_cli_orders_download_state", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_item_type_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_create_id_empty", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip_featureclass", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip_invalid_geometry", "tests/integration/test_orders_cli.py::test_cli_orders_create_clip_and_tools", "tests/integration/test_orders_cli.py::test_cli_orders_create_cloudconfig", "tests/integration/test_orders_cli.py::test_cli_orders_create_email", "tests/integration/test_orders_cli.py::test_cli_orders_create_tools", "tests/integration/test_orders_cli.py::test_cli_orders_read_file_json_doesnotexist", "tests/integration/test_orders_cli.py::test_cli_orders_read_file_json_invalidjson" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-06-03 22:29:22+00:00
apache-2.0
4,586
planetlabs__planet-client-python-583
diff --git a/planet/clients/data.py b/planet/clients/data.py index a7b96ec..0dbe7e3 100644 --- a/planet/clients/data.py +++ b/planet/clients/data.py @@ -40,6 +40,12 @@ class Items(Paged): ITEMS_KEY = 'features' +class Searches(Paged): + """Asynchronous iterator over searches from a paged response.""" + NEXT_KEY = '_next' + ITEMS_KEY = 'searches' + + class DataClient: """Low-level asynchronous access to Planet's data API. @@ -212,12 +218,25 @@ class DataClient: Returns: Description of the saved search. """ - raise NotImplementedError + url = f'{self._searches_url()}/{search_id}' + + request_json = { + 'name': name, + 'filter': search_filter, + 'item_types': item_types, + '__daily_email_enabled': enable_email + } + + request = self._request(url, method='PUT', json=request_json) + response = await self._do_request(request) + return response.json() async def list_searches( self, sort: str = 'created desc', - search_type: str = 'any') -> typing.AsyncIterator[dict]: + search_type: str = 'any', + limit: typing.Union[int, + None] = 100) -> typing.AsyncIterator[dict]: """List all saved searches available to the authenticated user. NOTE: the term 'saved' is overloaded here. We want to list saved @@ -228,18 +247,29 @@ class DataClient: Parameters: sort: Field and direction to order results by. search_type: Search type filter. + limit: Maximum number of items to return. Returns: - List of saved searches that match filter. + An iterator over all searches that match filter. Raises: planet.exceptions.APIError: On API error. planet.exceptions.ClientError: If sort or search_type are not valid. """ - # NOTE: check sort and search_type args are in LIST_SORT_ORDER and - # LIST_SEARCH_TYPE, respectively - raise NotImplementedError + sort = sort.lower() + if sort not in LIST_SORT_ORDER: + raise exceptions.ClientError( + f'{sort} must be one of {LIST_SORT_ORDER}') + + search_type = search_type.lower() + if search_type not in LIST_SEARCH_TYPE: + raise exceptions.ClientError( + f'{search_type} must be one of {LIST_SEARCH_TYPE}') + + url = f'{self._searches_url()}' + request = self._request(url, method='GET') + return Searches(request, self._do_request, limit=limit) async def delete_search(self, search_id: str): """Delete an existing saved search. @@ -247,13 +277,13 @@ class DataClient: Parameters: search_id: Saved search identifier. - Returns: - Nothing. - Raises: planet.exceptions.APIError: On API error. """ - raise NotImplementedError + url = f'{self._searches_url()}/{search_id}' + + request = self._request(url, method='DELETE') + await self._do_request(request) async def get_search(self, search_id: str) -> dict: """Get a saved search by id. @@ -269,11 +299,16 @@ class DataClient: """ raise NotImplementedError - async def run_search(self, search_id: str) -> typing.AsyncIterator[dict]: + async def run_search( + self, + search_id: str, + limit: typing.Union[int, + None] = 100) -> typing.AsyncIterator[dict]: """Execute a saved search. Parameters: search_id: Stored search identifier. + limit: Maximum number of items to return. Returns: Returns an iterator over all items matching the search. @@ -281,7 +316,10 @@ class DataClient: Raises: planet.exceptions.APIError: On API error. """ - raise NotImplementedError + url = f'{self._searches_url()}/{search_id}/results' + + request = self._request(url, method='GET') + return Items(request, self._do_request, limit=limit) async def get_stats(self, item_types: typing.List[str], @@ -301,6 +339,7 @@ class DataClient: planet.exceptions.APIError: On API error. planet.exceptions.ClientError: If interval is not valid. """ + interval = interval.lower() if interval not in STATS_INTERVAL: raise exceptions.ClientError( f'{interval} must be one of {STATS_INTERVAL}')
planetlabs/planet-client-python
2e789e3abc771ec7e6f55b663f40d601de3bce77
diff --git a/tests/integration/test_data_api.py b/tests/integration/test_data_api.py index c94c6da..b4f4372 100644 --- a/tests/integration/test_data_api.py +++ b/tests/integration/test_data_api.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. +from contextlib import nullcontext as does_not_raise from http import HTTPStatus import json import logging @@ -224,6 +225,149 @@ async def test_create_search_email(search_filter, session): assert search == page_response [email protected] [email protected] +async def test_update_search_basic(search_filter, session): + sid = 'search_id' + + page_response = { + "__daily_email_enabled": False, + "_links": { + "_self": "string", "thumbnail": "string" + }, + "created": "2019-08-24T14:15:22Z", + "filter": search_filter, + "id": sid, + "last_executed": "2019-08-24T14:15:22Z", + "name": "test", + "updated": "2019-08-24T14:15:22Z" + } + mock_resp = httpx.Response(HTTPStatus.OK, json=page_response) + respx.put(f'{TEST_SEARCHES_URL}/{sid}').return_value = mock_resp + + cl = DataClient(session, base_url=TEST_URL) + search = await cl.update_search(sid, 'test', ['PSScene'], search_filter) + + # check that request is correct + expected_request = { + "item_types": ["PSScene"], + "filter": search_filter, + "name": "test", + "__daily_email_enabled": False + } + actual_body = json.loads(respx.calls[0].request.content) + assert actual_body == expected_request + + # check the response is returned unaltered + assert search == page_response + + [email protected] [email protected] [email protected]("limit, expected_list_length", [(None, 4), (3, 3)]) +async def test_list_searches_success(limit, + expected_list_length, + search_result, + session): + page1_response = {"_links": {}, "searches": [search_result] * 4} + route = respx.get(TEST_SEARCHES_URL) + route.return_value = httpx.Response(200, json=page1_response) + + cl = DataClient(session, base_url=TEST_URL) + + searches = await cl.list_searches(limit=limit) + searches_list_length = len([s async for s in searches]) + assert searches_list_length == expected_list_length + + assert route.called + + [email protected] [email protected] [email protected]( + "sort, search_type, expectation", + [('DOESNOTEXIST', 'ANY', pytest.raises(exceptions.ClientError)), + ('CREATED DESC', 'DOESNOTEXIST', pytest.raises(exceptions.ClientError))]) +async def test_list_searches_args_do_not_match(sort, + search_type, + expectation, + session): + route = respx.get(TEST_SEARCHES_URL) + route.return_value = httpx.Response(200, json={}) + + cl = DataClient(session, base_url=TEST_URL) + + with expectation: + await cl.list_searches(sort=sort, search_type=search_type) + + assert not route.called + + [email protected] [email protected] [email protected]("retcode, expectation", + [(204, does_not_raise()), + (404, pytest.raises(exceptions.APIError))]) +async def test_delete_search(retcode, expectation, session): + sid = 'search_id' + mock_resp = httpx.Response(retcode) + route = respx.delete(f'{TEST_SEARCHES_URL}/{sid}') + route.return_value = mock_resp + cl = DataClient(session, base_url=TEST_URL) + + with expectation: + await cl.delete_search(sid) + + assert route.called + + [email protected] [email protected] +async def test_run_search_success(item_descriptions, session): + sid = 'search_id' + route = respx.get(f'{TEST_SEARCHES_URL}/{sid}/results') + + next_page_url = f'{TEST_URL}/blob/?page_marker=IAmATest' + item1, item2, item3 = item_descriptions + page1_response = { + "_links": { + "_next": next_page_url + }, "features": [item1, item2] + } + + route.return_value = httpx.Response(204, json=page1_response) + + page2_response = {"_links": {"_self": next_page_url}, "features": [item3]} + mock_resp2 = httpx.Response(HTTPStatus.OK, json=page2_response) + respx.get(next_page_url).return_value = mock_resp2 + + cl = DataClient(session, base_url=TEST_URL) + items = await cl.run_search(sid) + items_list = [i async for i in items] + + assert route.called + + # check that all of the items were returned unchanged + assert items_list == item_descriptions + + [email protected] [email protected] +async def test_run_search_doesnotexist(session): + sid = 'search_id' + route = respx.get(f'{TEST_SEARCHES_URL}/{sid}/results') + route.return_value = httpx.Response(404) + + cl = DataClient(session, base_url=TEST_URL) + with pytest.raises(exceptions.APIError): + items = await cl.run_search(sid) + # this won't throw the error until the iterator is processed + # issue 476 + [i async for i in items] + + assert route.called + + @respx.mock @pytest.mark.asyncio async def test_get_stats_success(search_filter, session):
implement data api search functions implement the following data api search functions: * list_searches * delete_search * get_search * update_search * run_search
0.0
2e789e3abc771ec7e6f55b663f40d601de3bce77
[ "tests/integration/test_data_api.py::test_update_search_basic", "tests/integration/test_data_api.py::test_list_searches_success[None-4]", "tests/integration/test_data_api.py::test_list_searches_success[3-3]", "tests/integration/test_data_api.py::test_list_searches_args_do_not_match[DOESNOTEXIST-ANY-expectation0]", "tests/integration/test_data_api.py::test_list_searches_args_do_not_match[CREATED", "tests/integration/test_data_api.py::test_delete_search[204-expectation0]", "tests/integration/test_data_api.py::test_delete_search[404-expectation1]", "tests/integration/test_data_api.py::test_run_search_success", "tests/integration/test_data_api.py::test_run_search_doesnotexist" ]
[ "tests/integration/test_data_api.py::test_quick_search_basic", "tests/integration/test_data_api.py::test_quick_search_sort", "tests/integration/test_data_api.py::test_quick_search_limit", "tests/integration/test_data_api.py::test_create_search_basic", "tests/integration/test_data_api.py::test_create_search_email", "tests/integration/test_data_api.py::test_get_stats_success", "tests/integration/test_data_api.py::test_get_stats_invalid_interval" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-06-06 01:50:11+00:00
apache-2.0
4,587
planetlabs__planet-client-python-621
diff --git a/planet/cli/data.py b/planet/cli/data.py index 2e4c633..c5dff65 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -18,6 +18,7 @@ from contextlib import asynccontextmanager import click from planet import data_filter, DataClient +from planet.clients.data import SEARCH_SORT, SEARCH_SORT_DEFAULT from . import types from .cmds import coro, translate_exceptions @@ -230,13 +231,15 @@ def filter(ctx, @coro @click.argument("item_types", type=types.CommaSeparatedString()) @click.argument("filter", type=types.JSON(), default="-", required=False) [email protected]('--name', - type=str, - default=False, - help=('Name of the saved search.')) @limit [email protected]('--name', type=str, help='Name of the saved search.') [email protected]('--sort', + type=click.Choice(SEARCH_SORT), + default=SEARCH_SORT_DEFAULT, + show_default=True, + help='Field and direction to order results by.') @pretty -async def search_quick(ctx, item_types, filter, name, limit, pretty): +async def search_quick(ctx, item_types, filter, limit, name, sort, pretty): """Execute a structured item search. This function outputs a series of GeoJSON descriptions, one for each of the @@ -251,11 +254,11 @@ async def search_quick(ctx, item_types, filter, name, limit, pretty): parameter will be applied to the stored quick search. """ async with data_client(ctx) as cl: - items = await cl.quick_search(name=name, - item_types=item_types, - search_filter=filter, - limit=limit, - sort=None) + items = await cl.quick_search(item_types, + filter, + name=name, + sort=sort, + limit=limit) async for item in items: echo_json(item, pretty) diff --git a/planet/clients/data.py b/planet/clients/data.py index 2b122e1..0197781 100644 --- a/planet/clients/data.py +++ b/planet/clients/data.py @@ -24,8 +24,14 @@ BASE_URL = f'{PLANET_BASE_URL}/data/v1/' SEARCHES_PATH = '/searches' STATS_PATH = '/stats' +# TODO: get these values from the spec directly gh-619 LIST_SORT_ORDER = ('created desc', 'created asc') LIST_SEARCH_TYPE = ('any', 'saved', 'quick') +SEARCH_SORT = ('published desc', + 'published asc', + 'acquired desc', + 'acquired asc') +SEARCH_SORT_DEFAULT = 'published desc' STATS_INTERVAL = ('hour', 'day', 'week', 'month', 'year') WAIT_DELAY = 5 @@ -102,8 +108,8 @@ class DataClient: """Execute a quick search. Quick searches are saved for a short period of time (~month). The - `name` parameter of the search defaults to the search id if `name` - is not given. + `name` parameter of the search defaults to the id of the generated + search id if `name` is not specified. Example: @@ -132,10 +138,8 @@ class DataClient: Parameters: item_types: The item types to include in the search. search_filter: Structured search criteria. - sort: Override default of 'published desc' for field and direction - to order results by. Specified as '<field> <direction>' where - direction is either 'desc' for descending direction or 'asc' - for ascending direction. + sort: Field and direction to order results by. Valid options are + given in SEARCH_SORT. name: The name of the saved search. limit: Maximum number of items to return. @@ -157,8 +161,11 @@ class DataClient: request_json['name'] = name params = {} - if sort: - # TODO: validate sort + if sort and sort != SEARCH_SORT_DEFAULT: + sort = sort.lower() + if sort not in SEARCH_SORT: + raise exceptions.ClientError( + f'{sort} must be one of {SEARCH_SORT}') params['sort'] = sort request = self._request(url,
planetlabs/planet-client-python
a2eadaac4f7163446599131deb6c67ad27e77af1
diff --git a/tests/integration/test_data_api.py b/tests/integration/test_data_api.py index a488165..3c02722 100644 --- a/tests/integration/test_data_api.py +++ b/tests/integration/test_data_api.py @@ -110,7 +110,7 @@ async def test_quick_search_sort(item_descriptions, search_response, session): - sort = 'created asc' + sort = 'acquired asc' quick_search_url = f'{TEST_URL}/quick-search?sort={sort}' item1, _, _ = item_descriptions diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 4e7e4ec..cf10c75 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -401,6 +401,51 @@ def test_data_search_quick_filter_success(invoke, item_types): assert len(result.output.strip().split('\n')) == 1 # we have 1 feature [email protected] +def test_data_search_quick_sort_success(invoke): + # this cannot be the default value or else the sort param will not be + # added to the url + sort = 'published asc' + search_url = f'{TEST_QUICKSEARCH_URL}?sort={sort}' + + filter = { + "type": "DateRangeFilter", + "field_name": "acquired", + "config": { + "gt": "2019-12-31T00:00:00Z" + } + } + + feature = {"key": "value"} + mock_resp = httpx.Response(HTTPStatus.OK, json={'features': [feature]}) + respx.post(search_url).return_value = mock_resp + + runner = CliRunner() + result = invoke( + ['search-quick', 'PSScene', json.dumps(filter), f'--sort={sort}'], + runner=runner) + assert result.exit_code == 0 + assert json.loads(result.output) == feature + + [email protected] +def test_data_search_quick_sort_invalid(invoke): + filter = { + "type": "DateRangeFilter", + "field_name": "acquired", + "config": { + "gt": "2019-12-31T00:00:00Z" + } + } + + runner = CliRunner() + result = invoke( + ['search-quick', 'PSScene', json.dumps(filter), '--sort=invalid'], + runner=runner) + + assert result.exit_code == 2 + + @respx.mock @pytest.mark.parametrize("limit,limited_list_length", [(None, 100), (0, 102), (1, 1)])
Support sort in data CLI Right now the CLI doesn't support the 'sort' options. Let's get it in there.
0.0
a2eadaac4f7163446599131deb6c67ad27e77af1
[ "tests/integration/test_data_cli.py::test_data_search_quick_sort_success" ]
[ "tests/integration/test_data_api.py::test_quick_search_basic", "tests/integration/test_data_api.py::test_quick_search_sort", "tests/integration/test_data_api.py::test_quick_search_limit", "tests/integration/test_data_api.py::test_create_search_basic", "tests/integration/test_data_api.py::test_create_search_email", "tests/integration/test_data_api.py::test_get_search_success", "tests/integration/test_data_api.py::test_get_search_id_doesnt_exist", "tests/integration/test_data_api.py::test_update_search_basic", "tests/integration/test_data_api.py::test_list_searches_success[None-4]", "tests/integration/test_data_api.py::test_list_searches_success[3-3]", "tests/integration/test_data_api.py::test_list_searches_args_do_not_match[DOESNOTEXIST-ANY-expectation0]", "tests/integration/test_data_api.py::test_list_searches_args_do_not_match[CREATED", "tests/integration/test_data_api.py::test_delete_search[204-expectation0]", "tests/integration/test_data_api.py::test_delete_search[404-expectation1]", "tests/integration/test_data_api.py::test_run_search_success", "tests/integration/test_data_api.py::test_run_search_doesnotexist", "tests/integration/test_data_api.py::test_get_stats_success", "tests/integration/test_data_api.py::test_get_stats_invalid_interval", "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_quick_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_invalid_json[item_types2-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_invalid_json[item_types2-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_quick_filter_success[item_types2]", "tests/integration/test_data_cli.py::test_data_search_quick_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_quick_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_quick_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_quick_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[item_types2-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[item_types2-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[item_types2]", "tests/integration/test_data_cli.py::test_search_create_daily_email", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-06-29 22:54:05+00:00
apache-2.0
4,588
planetlabs__planet-client-python-675
diff --git a/planet/cli/cli.py b/planet/cli/cli.py index 41cf057..d75eb33 100644 --- a/planet/cli/cli.py +++ b/planet/cli/cli.py @@ -27,20 +27,17 @@ LOGGER = logging.getLogger(__name__) @click.group() @click.pass_context [email protected]('--verbosity', - default="warning", - help=("Optional: set verbosity level to warning, info, or debug.\ - Defaults to warning.")) @click.option('--quiet', is_flag=True, default=False, help='Disable ANSI control output.') @click.version_option(version=planet.__version__) [email protected]('--verbosity', + default="warning", + help=("Optional: set verbosity level to warning, info, or debug.\ + Defaults to warning.")) def main(ctx, verbosity, quiet): - """Planet API Client - Parameters: - ctx -- context object - verbosity -- user input for verbosity.""" + """Planet SDK for Python CLI""" _configure_logging(verbosity) # ensure that ctx.obj exists and is a dict (in case `cli()` is called diff --git a/planet/cli/data.py b/planet/cli/data.py index 5f13850..1abdcde 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -19,6 +19,7 @@ import click from planet import data_filter, DataClient from planet.clients.data import SEARCH_SORT, SEARCH_SORT_DEFAULT, STATS_INTERVAL +from planet.specs import get_item_types from . import types from .cmds import coro, translate_exceptions @@ -26,6 +27,9 @@ from .io import echo_json from .options import limit, pretty from .session import CliSession +ALL_ITEM_TYPES = get_item_types() +valid_item_string = "Valid entries for ITEM_TYPES: " + "|".join(ALL_ITEM_TYPES) + @asynccontextmanager async def data_client(ctx): @@ -43,7 +47,7 @@ async def data_client(ctx): default=None, help='Assign custom base Orders API URL.') def data(ctx, base_url): - '''Commands for interacting with the Orders API''' + '''Commands for interacting with the Data API''' ctx.obj['AUTH'] = None ctx.obj['BASE_URL'] = base_url @@ -58,6 +62,17 @@ def assets_to_filter(ctx, param, assets: List[str]) -> Optional[dict]: return data_filter.asset_filter(assets) if assets else None +def check_item_types(ctx, param, item_types) -> Optional[List[dict]]: + # Set difference between given item types and all item types + set_diff = set([item.lower() for item in item_types]) - set( + [a.lower() for a in ALL_ITEM_TYPES]) + if set_diff: + raise click.BadParameter( + f'{item_types} should be one of {ALL_ITEM_TYPES}') + else: + return item_types + + def date_range_to_filter(ctx, param, values) -> Optional[List[dict]]: def _func(obj): @@ -226,11 +241,13 @@ def filter(ctx, echo_json(filt, pretty) [email protected]() [email protected](epilog=valid_item_string) @click.pass_context @translate_exceptions @coro [email protected]("item_types", type=types.CommaSeparatedString()) [email protected]("item_types", + type=types.CommaSeparatedString(), + callback=check_item_types) @click.argument("filter", type=types.JSON(), default="-", required=False) @limit @click.option('--name', type=str, help='Name of the saved search.') @@ -264,12 +281,14 @@ async def search(ctx, item_types, filter, limit, name, sort, pretty): echo_json(item, pretty) [email protected]() [email protected](epilog=valid_item_string) @click.pass_context @translate_exceptions @coro @click.argument('name') [email protected]("item_types", type=types.CommaSeparatedString()) [email protected]("item_types", + type=types.CommaSeparatedString(), + callback=check_item_types) @click.argument("filter", type=types.JSON(), default="-", required=False) @click.option('--daily-email', is_flag=True, @@ -296,11 +315,13 @@ async def search_create(ctx, name, item_types, filter, daily_email, pretty): echo_json(items, pretty) [email protected]() [email protected](epilog=valid_item_string) @click.pass_context @translate_exceptions @coro [email protected]("item_types", type=types.CommaSeparatedString()) [email protected]("item_types", + type=types.CommaSeparatedString(), + callback=check_item_types) @click.argument('interval', type=click.Choice(STATS_INTERVAL)) @click.argument("filter", type=types.JSON(), default="-", required=False) async def stats(ctx, item_types, interval, filter): diff --git a/planet/cli/subscriptions.py b/planet/cli/subscriptions.py index 3f465a0..95b7f9e 100644 --- a/planet/cli/subscriptions.py +++ b/planet/cli/subscriptions.py @@ -13,6 +13,7 @@ from planet.clients.subscriptions import SubscriptionsClient @click.group() @click.pass_context def subscriptions(ctx): + '''Commands for interacting with the Subscriptions API''' # None means that order of precedence is 1) environment variable, # 2) secret file. ctx.obj['AUTH'] = None diff --git a/planet/specs.py b/planet/specs.py index f3971ab..0f8c155 100644 --- a/planet/specs.py +++ b/planet/specs.py @@ -93,10 +93,10 @@ def get_match(test_entry, spec_entries): is hard to remember but must be exact otherwise the API throws an exception.''' try: - match = next(t for t in spec_entries - if t.lower() == test_entry.lower()) + match = next(e for e in spec_entries + if e.lower() == test_entry.lower()) except (StopIteration): - raise NoMatchException + raise NoMatchException('{test_entry} should be one of {spec_entries}') return match @@ -107,10 +107,19 @@ def get_product_bundles(): return spec['bundles'].keys() -def get_item_types(product_bundle): - '''Get item types supported by Orders API for the given product bundle.''' +def get_item_types(product_bundle=None): + '''If given product bundle, get specific item types supported by Orders + API. Otherwise, get all item types supported by Orders API.''' spec = _get_product_bundle_spec() - return spec['bundles'][product_bundle]['assets'].keys() + if product_bundle: + item_types = spec['bundles'][product_bundle]['assets'].keys() + else: + product_bundle = get_product_bundles() + all_item_types = [] + for bundle in product_bundle: + all_item_types += [*spec['bundles'][bundle]['assets'].keys()] + item_types = set(all_item_types) + return item_types def _get_product_bundle_spec():
planetlabs/planet-client-python
eba7ed475c12dd7a75bf40ea79b0cb73ca66e964
diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 60a1a64..f146cf0 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -23,6 +23,7 @@ from click.testing import CliRunner import pytest from planet.cli import cli +from planet.specs import get_item_types LOGGER = logging.getLogger(__name__) @@ -55,6 +56,23 @@ def test_data_command_registered(invoke): # Add other sub-commands here. +def test_data_search_command_registered(invoke): + """planet-data search command prints help and usage message.""" + runner = CliRunner() + result = invoke(["search", "--help"], runner=runner) + all_item_types = [a for a in get_item_types()] + assert result.exit_code == 0 + assert "Usage" in result.output + assert "limit" in result.output + assert "name" in result.output + assert "sort" in result.output + assert "pretty" in result.output + assert "help" in result.output + for a in all_item_types: + assert a in result.output.replace('\n', '').replace(' ', '') + # Add other sub-commands here. + + PERMISSION_FILTER = {"type": "PermissionFilter", "config": ["assets:download"]} STD_QUALITY_FILTER = { "type": "StringInFilter", @@ -358,8 +376,8 @@ def test_data_filter_update(invoke, assert_and_filters_equal, default_filters): @respx.mock @pytest.mark.asyncio @pytest.mark.parametrize("filter", ['{1:1}', '{"foo"}']) [email protected]( - "item_types", ['PSScene', 'SkySatScene', ('PSScene', 'SkySatScene')]) [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) def test_data_search_cmd_filter_invalid_json(invoke, item_types, filter): """Test for planet data search_quick. Test with multiple item_types. Test should fail as filter does not contain valid JSON.""" @@ -375,8 +393,8 @@ def test_data_search_cmd_filter_invalid_json(invoke, item_types, filter): @respx.mock [email protected]( - "item_types", ['PSScene', 'SkySatScene', ('PSScene', 'SkySatScene')]) [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) def test_data_search_cmd_filter_success(invoke, item_types): """Test for planet data search_quick. Test with multiple item_types. Test should succeed as filter contains valid JSON.""" @@ -495,8 +513,8 @@ def test_data_search_cmd_limit(invoke, @respx.mock @pytest.mark.asyncio @pytest.mark.parametrize("filter", ['{1:1}', '{"foo"}']) [email protected]( - "item_types", ['PSScene', 'SkySatScene', ('PSScene', 'SkySatScene')]) [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) def test_data_search_create_filter_invalid_json(invoke, item_types, filter): """Test for planet data search_create. Test with multiple item_types. Test should fail as filter does not contain valid JSON.""" @@ -514,8 +532,8 @@ def test_data_search_create_filter_invalid_json(invoke, item_types, filter): @respx.mock [email protected]( - "item_types", ['PSScene', 'SkySatScene', ('PSScene', 'SkySatScene')]) [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) def test_data_search_create_filter_success(invoke, item_types): """Test for planet data search_create. Test with multiple item_types. Test should succeed as filter contains valid JSON.""" @@ -601,8 +619,8 @@ def test_data_stats_invalid_filter(invoke, filter): @respx.mock [email protected]( - "item_types", ['PSScene', 'SkySatScene', ('PSScene', 'SkySatScene')]) [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) @pytest.mark.parametrize("interval, exit_code", [(None, 1), ('hou', 2), ('hour', 0)]) def test_data_stats_invalid_interval(invoke, item_types, interval, exit_code): @@ -630,8 +648,8 @@ def test_data_stats_invalid_interval(invoke, item_types, interval, exit_code): @respx.mock [email protected]( - "item_types", ['PSScene', 'SkySatScene', ('PSScene', 'SkySatScene')]) [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) @pytest.mark.parametrize("interval", ['hour', 'day', 'week', 'month', 'year']) def test_data_stats_success(invoke, item_types, interval): """Test for planet data stats. Test with multiple item_types. diff --git a/tests/unit/test_data_item_type.py b/tests/unit/test_data_item_type.py new file mode 100644 index 0000000..41d7b78 --- /dev/null +++ b/tests/unit/test_data_item_type.py @@ -0,0 +1,55 @@ +# Copyright 2022 Planet Labs PBC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import pytest +import click +from planet.cli.data import check_item_types + +LOGGER = logging.getLogger(__name__) + + +class MockContext: + + def __init__(self): + self.obj = {} + + [email protected]("item_types", + [ + 'PSScene3Band', + 'MOD09GQ', + 'MYD09GA', + 'REOrthoTile', + 'SkySatCollect', + 'SkySatScene', + 'MYD09GQ', + 'Landsat8L1G', + 'Sentinel2L1C', + 'MOD09GA', + 'Sentinel1', + 'PSScene', + 'PSOrthoTile', + 'PSScene4Band', + 'REScene' + ]) +def test_item_type_success(item_types): + ctx = MockContext() + result = check_item_types(ctx, 'item_types', [item_types]) + assert result == [item_types] + + +def test_item_type_fail(): + ctx = MockContext() + with pytest.raises(click.BadParameter): + check_item_types(ctx, 'item_type', "bad_item_type") diff --git a/tests/unit/test_specs.py b/tests/unit/test_specs.py index 6f721e9..21b4e57 100644 --- a/tests/unit/test_specs.py +++ b/tests/unit/test_specs.py @@ -23,6 +23,23 @@ LOGGER = logging.getLogger(__name__) TEST_PRODUCT_BUNDLE = 'visual' # must be a valid item type for TEST_PRODUCT_BUNDLE TEST_ITEM_TYPE = 'PSScene' +ALL_ITEM_TYPES = [ + 'PSOrthoTile', + 'Sentinel1', + 'REOrthoTile', + 'PSScene', + 'PSScene4Band', + 'Landsat8L1G', + 'PSScene3Band', + 'REScene', + 'MOD09GA', + 'MYD09GA', + 'MOD09GQ', + 'SkySatCollect', + 'Sentinel2L1C', + 'MYD09GQ', + 'SkySatScene' +] def test_get_type_match(): @@ -90,6 +107,12 @@ def test_get_product_bundles(): assert TEST_PRODUCT_BUNDLE in bundles -def test_get_item_types(): - item_types = specs.get_item_types(TEST_PRODUCT_BUNDLE) +def test_get_item_types_with_bundle(): + item_types = specs.get_item_types(product_bundle=TEST_PRODUCT_BUNDLE) assert TEST_ITEM_TYPE in item_types + + +def test_get_item_types_without_bundle(): + item_types = specs.get_item_types() + for item in item_types: + assert item in ALL_ITEM_TYPES
Improve default CLI help The message that pops up when you first try `planet` can use a few improvements: ``` planet --help Usage: planet [OPTIONS] COMMAND [ARGS]... Planet API Client Parameters: ctx -- context object verbosity -- user input for verbosity. Options: --verbosity TEXT Optional: set verbosity level to warning, info, or debug. Defaults to warning. --quiet Disable ANSI control output. --version Show the version and exit. --help Show this message and exit. Commands: auth Commands for working with Planet authentication collect Collect a sequence of JSON descriptions into a single... data Commands for interacting with the Orders API orders Commands for interacting with the Orders API subscriptions ``` - [x] The third line is a bit strange, seems like it's about the API? `Planet API Client Parameters: ctx -- context object verbosity -- user input for verbosity.` I'd say remove, unless there's something it's getting at I don't know? - [x] maybe put verbosity last in the 'options', as that seems the least likely to be used. - [x] `data` says 'commands for interacting with the Orders API. Should be Data API. - [x] subscriptions has no description.
0.0
eba7ed475c12dd7a75bf40ea79b0cb73ca66e964
[ "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_data_search_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_search_create_daily_email", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{1:1}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene,", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found", "tests/unit/test_data_item_type.py::test_item_type_success[PSScene3Band]", "tests/unit/test_data_item_type.py::test_item_type_success[MOD09GQ]", "tests/unit/test_data_item_type.py::test_item_type_success[MYD09GA]", "tests/unit/test_data_item_type.py::test_item_type_success[REOrthoTile]", "tests/unit/test_data_item_type.py::test_item_type_success[SkySatCollect]", "tests/unit/test_data_item_type.py::test_item_type_success[SkySatScene]", "tests/unit/test_data_item_type.py::test_item_type_success[MYD09GQ]", "tests/unit/test_data_item_type.py::test_item_type_success[Landsat8L1G]", "tests/unit/test_data_item_type.py::test_item_type_success[Sentinel2L1C]", "tests/unit/test_data_item_type.py::test_item_type_success[MOD09GA]", "tests/unit/test_data_item_type.py::test_item_type_success[Sentinel1]", "tests/unit/test_data_item_type.py::test_item_type_success[PSScene]", "tests/unit/test_data_item_type.py::test_item_type_success[PSOrthoTile]", "tests/unit/test_data_item_type.py::test_item_type_success[PSScene4Band]", "tests/unit/test_data_item_type.py::test_item_type_success[REScene]", "tests/unit/test_data_item_type.py::test_item_type_fail", "tests/unit/test_specs.py::test_get_type_match", "tests/unit/test_specs.py::test_validate_bundle_supported", "tests/unit/test_specs.py::test_validate_bundle_notsupported", "tests/unit/test_specs.py::test_validate_item_type_supported", "tests/unit/test_specs.py::test_validate_item_type_notsupported_bundle", "tests/unit/test_specs.py::test_validate_item_type_notsupported_itemtype", "tests/unit/test_specs.py::test_validate_order_type_supported", "tests/unit/test_specs.py::test_validate_order_type_notsupported", "tests/unit/test_specs.py::test_validate_arhive_type_supported", "tests/unit/test_specs.py::test_validate_arhive_type_notsupported", "tests/unit/test_specs.py::test_validate_file_format_supported", "tests/unit/test_specs.py::test_validate_file_format_notsupported", "tests/unit/test_specs.py::test_get_product_bundles", "tests/unit/test_specs.py::test_get_item_types_with_bundle", "tests/unit/test_specs.py::test_get_item_types_without_bundle" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-08-10 14:48:23+00:00
apache-2.0
4,589
planetlabs__planet-client-python-677
diff --git a/planet/specs.py b/planet/specs.py index 0f8c155..9c08a0b 100644 --- a/planet/specs.py +++ b/planet/specs.py @@ -15,7 +15,7 @@ """Functionality for validating against the Planet API specification.""" import json import logging - +import itertools from .constants import DATA_DIR PRODUCT_BUNDLE_SPEC_NAME = 'orders_product_bundle_2022_02_02.json' @@ -37,19 +37,32 @@ SUPPORTED_FILE_FORMATS = ['COG', 'PL_NITF'] LOGGER = logging.getLogger(__name__) -class SpecificationException(Exception): +class NoMatchException(Exception): '''No match was found''' pass +class SpecificationException(Exception): + '''No match was found''' + + def __init__(self, value, supported, field_name): + self.value = value + self.supported = supported + self.field_name = field_name + self.opts = ', '.join(["'" + s + "'" for s in supported]) + + def __str__(self): + return f'{self.field_name} - \'{self.value}\' is not one of {self.opts}.' + + def validate_bundle(bundle): supported = get_product_bundles() return _validate_field(bundle, supported, 'product_bundle') def validate_item_type(item_type, bundle): - bundle = validate_bundle(bundle) - supported = get_item_types(bundle) + validated_bundle = validate_bundle(bundle) + supported = get_item_types(validated_bundle) return _validate_field(item_type, supported, 'item_type') @@ -73,20 +86,13 @@ def validate_file_format(file_format): def _validate_field(value, supported, field_name): try: - value = get_match(value, supported) + value = get_match(value, supported, field_name) except (NoMatchException): - opts = ', '.join(["'" + s + "'" for s in supported]) - msg = f'{field_name} - \'{value}\' is not one of {opts}.' - raise SpecificationException(msg) + raise SpecificationException(value, supported, field_name) return value -class NoMatchException(Exception): - '''No match was found''' - pass - - -def get_match(test_entry, spec_entries): +def get_match(test_entry, spec_entries, field_name): '''Find and return matching spec entry regardless of capitalization. This is helpful for working with the API spec, where the capitalization @@ -96,7 +102,7 @@ def get_match(test_entry, spec_entries): match = next(e for e in spec_entries if e.lower() == test_entry.lower()) except (StopIteration): - raise NoMatchException('{test_entry} should be one of {spec_entries}') + raise SpecificationException(test_entry, spec_entries, field_name) return match @@ -111,14 +117,15 @@ def get_item_types(product_bundle=None): '''If given product bundle, get specific item types supported by Orders API. Otherwise, get all item types supported by Orders API.''' spec = _get_product_bundle_spec() + if product_bundle: - item_types = spec['bundles'][product_bundle]['assets'].keys() + item_types = set(spec['bundles'][product_bundle]['assets'].keys()) else: - product_bundle = get_product_bundles() - all_item_types = [] - for bundle in product_bundle: - all_item_types += [*spec['bundles'][bundle]['assets'].keys()] - item_types = set(all_item_types) + item_types = set( + itertools.chain.from_iterable( + spec['bundles'][bundle]['assets'].keys() + for bundle in get_product_bundles())) + return item_types
planetlabs/planet-client-python
87c8b0a3f26c94c38eba84894f73268666eafc6d
diff --git a/tests/unit/test_specs.py b/tests/unit/test_specs.py index 21b4e57..e7c17bf 100644 --- a/tests/unit/test_specs.py +++ b/tests/unit/test_specs.py @@ -46,10 +46,11 @@ def test_get_type_match(): spec_list = ['Locket', 'drop', 'DEER'] test_entry = 'locket' - assert 'Locket' == specs.get_match(test_entry, spec_list) + field_name = 'field_name' + assert 'Locket' == specs.get_match(test_entry, spec_list, field_name) - with pytest.raises(specs.NoMatchException): - specs.get_match('a', ['b']) + with pytest.raises(specs.SpecificationException): + specs.get_match('a', ['b'], field_name) def test_validate_bundle_supported():
planet order request: be more helpful on what bundles are available for a given item_type Right now `planet order request` fails if item_type and bundle are incompatible by saying what item-types are compatible with the specified bundle. It would be more helpful to switch this so that it indicates what bundles are compatible with an item-type. This has been hitting me as I've changed over from `PSScene4Band` to `PSScene` item-type, which is not compatible with the `analytic` bundle.
0.0
87c8b0a3f26c94c38eba84894f73268666eafc6d
[ "tests/unit/test_specs.py::test_get_type_match" ]
[ "tests/unit/test_specs.py::test_validate_bundle_supported", "tests/unit/test_specs.py::test_validate_bundle_notsupported", "tests/unit/test_specs.py::test_validate_item_type_supported", "tests/unit/test_specs.py::test_validate_item_type_notsupported_bundle", "tests/unit/test_specs.py::test_validate_item_type_notsupported_itemtype", "tests/unit/test_specs.py::test_validate_order_type_supported", "tests/unit/test_specs.py::test_validate_order_type_notsupported", "tests/unit/test_specs.py::test_validate_arhive_type_supported", "tests/unit/test_specs.py::test_validate_arhive_type_notsupported", "tests/unit/test_specs.py::test_validate_file_format_supported", "tests/unit/test_specs.py::test_validate_file_format_notsupported", "tests/unit/test_specs.py::test_get_product_bundles", "tests/unit/test_specs.py::test_get_item_types_with_bundle", "tests/unit/test_specs.py::test_get_item_types_without_bundle" ]
{ "failed_lite_validators": [ "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2022-08-18 14:50:04+00:00
apache-2.0
4,590
planetlabs__planet-client-python-695
diff --git a/planet/cli/data.py b/planet/cli/data.py index 1abdcde..5588c6d 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -19,7 +19,7 @@ import click from planet import data_filter, DataClient from planet.clients.data import SEARCH_SORT, SEARCH_SORT_DEFAULT, STATS_INTERVAL -from planet.specs import get_item_types +from planet.specs import get_item_types, SpecificationException from . import types from .cmds import coro, translate_exceptions @@ -64,11 +64,12 @@ def assets_to_filter(ctx, param, assets: List[str]) -> Optional[dict]: def check_item_types(ctx, param, item_types) -> Optional[List[dict]]: # Set difference between given item types and all item types - set_diff = set([item.lower() for item in item_types]) - set( + invalid_item_types = set([item.lower() for item in item_types]) - set( [a.lower() for a in ALL_ITEM_TYPES]) - if set_diff: - raise click.BadParameter( - f'{item_types} should be one of {ALL_ITEM_TYPES}') + if invalid_item_types: + raise SpecificationException(invalid_item_types, + ALL_ITEM_TYPES, + 'item_type') else: return item_types
planetlabs/planet-client-python
6db89633fa14f05e71e8304e479923b0e2cb34bd
diff --git a/tests/unit/test_data_item_type.py b/tests/unit/test_data_item_type.py index 41d7b78..a777609 100644 --- a/tests/unit/test_data_item_type.py +++ b/tests/unit/test_data_item_type.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import pytest -import click +from planet.specs import SpecificationException from planet.cli.data import check_item_types LOGGER = logging.getLogger(__name__) @@ -51,5 +51,5 @@ def test_item_type_success(item_types): def test_item_type_fail(): ctx = MockContext() - with pytest.raises(click.BadParameter): + with pytest.raises(SpecificationException): check_item_types(ctx, 'item_type', "bad_item_type")
Report invalid item-types with more human-readable list With `planet data search` if I do an improper item-type I get a response like: ``` Error: Invalid value for 'ITEM_TYPES': ['psscend'] should be one of {'PSScene', 'MOD09GA', 'MYD09GA', 'MYD09GQ', 'SkySatCollect', 'REOrthoTile', 'REScene', 'MOD09GQ', 'PSScene4Band', 'Sentinel1', 'Sentinel2L1C', 'SkySatScene', 'Landsat8L1G', 'PSOrthoTile', 'PSScene3Band'} ``` The { and ' characters make it feel like a dump of a program. It'd be better to have it look like a more natural list. `planet orders request` does it in a more readable way, so likely can just copy theirs: ``` Error: Invalid value: item_type - 'skysatcollectd' is not one of 'PSScene', 'Landsat8L1G', 'SkySatScene', 'SkySatCollect', 'PSOrthoTile', 'REOrthoTile', 'PSScene3Band', 'Sentinel2L1C'. ```
0.0
6db89633fa14f05e71e8304e479923b0e2cb34bd
[ "tests/unit/test_data_item_type.py::test_item_type_fail" ]
[ "tests/unit/test_data_item_type.py::test_item_type_success[PSScene3Band]", "tests/unit/test_data_item_type.py::test_item_type_success[MOD09GQ]", "tests/unit/test_data_item_type.py::test_item_type_success[MYD09GA]", "tests/unit/test_data_item_type.py::test_item_type_success[REOrthoTile]", "tests/unit/test_data_item_type.py::test_item_type_success[SkySatCollect]", "tests/unit/test_data_item_type.py::test_item_type_success[SkySatScene]", "tests/unit/test_data_item_type.py::test_item_type_success[MYD09GQ]", "tests/unit/test_data_item_type.py::test_item_type_success[Landsat8L1G]", "tests/unit/test_data_item_type.py::test_item_type_success[Sentinel2L1C]", "tests/unit/test_data_item_type.py::test_item_type_success[MOD09GA]", "tests/unit/test_data_item_type.py::test_item_type_success[Sentinel1]", "tests/unit/test_data_item_type.py::test_item_type_success[PSScene]", "tests/unit/test_data_item_type.py::test_item_type_success[PSOrthoTile]", "tests/unit/test_data_item_type.py::test_item_type_success[PSScene4Band]", "tests/unit/test_data_item_type.py::test_item_type_success[REScene]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-09-16 22:13:57+00:00
apache-2.0
4,591
planetlabs__planet-client-python-713
diff --git a/planet/cli/data.py b/planet/cli/data.py index e45dbad..6ce15ee 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -21,7 +21,9 @@ from planet import data_filter, DataClient from planet.clients.data import (SEARCH_SORT, SEARCH_SORT_DEFAULT, STATS_INTERVAL) -from planet.specs import get_item_types, SpecificationException +from planet.specs import (get_item_types, + validate_item_type, + SpecificationException) from . import types from .cmds import coro, translate_exceptions @@ -65,15 +67,14 @@ def assets_to_filter(ctx, param, assets: List[str]) -> Optional[dict]: def check_item_types(ctx, param, item_types) -> Optional[List[dict]]: - # Set difference between given item types and all item types - invalid_item_types = set([item.lower() for item in item_types]) - set( - [a.lower() for a in ALL_ITEM_TYPES]) - if invalid_item_types: - raise SpecificationException(invalid_item_types, - ALL_ITEM_TYPES, - 'item_type') - else: + '''Validates the item type by comparing the inputted item type to all + supported item types.''' + try: + for item_type in item_types: + validate_item_type(item_type) return item_types + except SpecificationException as e: + raise click.BadParameter(e) def date_range_to_filter(ctx, param, values) -> Optional[List[dict]]:
planetlabs/planet-client-python
3aa079918b098e9ff6bae346f64d8bd87db5a9d7
diff --git a/tests/integration/test_orders_cli.py b/tests/integration/test_orders_cli.py index 3610ad9..59909c8 100644 --- a/tests/integration/test_orders_cli.py +++ b/tests/integration/test_orders_cli.py @@ -491,7 +491,8 @@ def test_cli_orders_request_item_type_invalid(invoke): '--id=4500474_2133707_2021-05-20_2419', ]) assert result.exit_code == 2 - assert "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" in result.output + error_msg = "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" + assert error_msg in result.output def test_cli_orders_request_product_bundle_invalid(invoke): @@ -503,7 +504,8 @@ def test_cli_orders_request_product_bundle_invalid(invoke): '--id=4500474_2133707_2021-05-20_2419', ]) assert result.exit_code == 2 - assert "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" in result.output + error_msg = "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" + assert error_msg in result.output def test_cli_orders_request_product_bundle_incompatible(invoke): @@ -515,7 +517,8 @@ def test_cli_orders_request_product_bundle_incompatible(invoke): '--id=4500474_2133707_2021-05-20_2419', ]) assert result.exit_code == 2 - assert "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" in result.output + error_msg = "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" + assert error_msg in result.output def test_cli_orders_request_id_empty(invoke): diff --git a/tests/unit/test_data_callbacks.py b/tests/unit/test_data_callbacks.py index a777609..41d7b78 100644 --- a/tests/unit/test_data_callbacks.py +++ b/tests/unit/test_data_callbacks.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import pytest -from planet.specs import SpecificationException +import click from planet.cli.data import check_item_types LOGGER = logging.getLogger(__name__) @@ -51,5 +51,5 @@ def test_item_type_success(item_types): def test_item_type_fail(): ctx = MockContext() - with pytest.raises(SpecificationException): + with pytest.raises(click.BadParameter): check_item_types(ctx, 'item_type', "bad_item_type")
Report invalid item-types with more human-readable list With `planet data search` if I do an improper item-type I get a response like: ``` Error: Invalid value for 'ITEM_TYPES': ['psscend'] should be one of {'PSScene', 'MOD09GA', 'MYD09GA', 'MYD09GQ', 'SkySatCollect', 'REOrthoTile', 'REScene', 'MOD09GQ', 'PSScene4Band', 'Sentinel1', 'Sentinel2L1C', 'SkySatScene', 'Landsat8L1G', 'PSOrthoTile', 'PSScene3Band'} ``` The { and ' characters make it feel like a dump of a program. It'd be better to have it look like a more natural list. `planet orders request` does it in a more readable way, so likely can just copy theirs: ``` Error: Invalid value: item_type - 'skysatcollectd' is not one of 'PSScene', 'Landsat8L1G', 'SkySatScene', 'SkySatCollect', 'PSOrthoTile', 'REOrthoTile', 'PSScene3Band', 'Sentinel2L1C'. ```
0.0
3aa079918b098e9ff6bae346f64d8bd87db5a9d7
[ "tests/unit/test_data_callbacks.py::test_item_type_fail" ]
[ "tests/integration/test_orders_cli.py::test_cli_orders_list_basic", "tests/integration/test_orders_cli.py::test_cli_orders_list_empty", "tests/integration/test_orders_cli.py::test_cli_orders_list_state", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[None-100]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[0-102]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[1-1]", "tests/integration/test_orders_cli.py::test_cli_orders_list_pretty", "tests/integration/test_orders_cli.py::test_cli_orders_get", "tests/integration/test_orders_cli.py::test_cli_orders_get_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_cancel", "tests/integration/test_orders_cli.py::test_cli_orders_cancel_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_wait_default", "tests/integration/test_orders_cli.py::test_cli_orders_wait_max_attempts", "tests/integration/test_orders_cli.py::test_cli_orders_download_default", "tests/integration/test_orders_cli.py::test_cli_orders_download_checksum", "tests/integration/test_orders_cli.py::test_cli_orders_download_dest", "tests/integration/test_orders_cli.py::test_cli_orders_download_overwrite", "tests/integration/test_orders_cli.py::test_cli_orders_download_state", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_request_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_request_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_request_item_type_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_request_product_bundle_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_request_product_bundle_incompatible", "tests/integration/test_orders_cli.py::test_cli_orders_request_id_empty", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[geom_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[feature_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[featurecollection_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_invalid_geometry", "tests/integration/test_orders_cli.py::test_cli_orders_request_both_clip_and_tools", "tests/integration/test_orders_cli.py::test_cli_orders_request_cloudconfig", "tests/integration/test_orders_cli.py::test_cli_orders_request_email", "tests/integration/test_orders_cli.py::test_cli_orders_request_tools", "tests/integration/test_orders_cli.py::test_cli_orders_request_no_stac", "tests/unit/test_data_callbacks.py::test_item_type_success[PSScene3Band]", "tests/unit/test_data_callbacks.py::test_item_type_success[MOD09GQ]", "tests/unit/test_data_callbacks.py::test_item_type_success[MYD09GA]", "tests/unit/test_data_callbacks.py::test_item_type_success[REOrthoTile]", "tests/unit/test_data_callbacks.py::test_item_type_success[SkySatCollect]", "tests/unit/test_data_callbacks.py::test_item_type_success[SkySatScene]", "tests/unit/test_data_callbacks.py::test_item_type_success[MYD09GQ]", "tests/unit/test_data_callbacks.py::test_item_type_success[Landsat8L1G]", "tests/unit/test_data_callbacks.py::test_item_type_success[Sentinel2L1C]", "tests/unit/test_data_callbacks.py::test_item_type_success[MOD09GA]", "tests/unit/test_data_callbacks.py::test_item_type_success[Sentinel1]", "tests/unit/test_data_callbacks.py::test_item_type_success[PSScene]", "tests/unit/test_data_callbacks.py::test_item_type_success[PSOrthoTile]", "tests/unit/test_data_callbacks.py::test_item_type_success[PSScene4Band]", "tests/unit/test_data_callbacks.py::test_item_type_success[REScene]" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-10-12 18:01:06+00:00
apache-2.0
4,592
planetlabs__planet-client-python-768
diff --git a/planet/clients/orders.py b/planet/clients/orders.py index e405046..733e96f 100644 --- a/planet/clients/orders.py +++ b/planet/clients/orders.py @@ -481,15 +481,14 @@ class OrdersClient: planet.exceptions.ClientError: If state is not valid. """ url = self._orders_url() + params = {"source_type": "all"} if state: if state not in ORDER_STATE_SEQUENCE: raise exceptions.ClientError( f'Order state ({state}) is not a valid state. ' f'Valid states are {ORDER_STATE_SEQUENCE}') - params = {"state": state} - else: - params = None + params['state'] = state response = await self._session.request(method='GET', url=url,
planetlabs/planet-client-python
8a458acfd4eb6e7eff5dbe275773ac84185424a3
diff --git a/tests/integration/test_orders_api.py b/tests/integration/test_orders_api.py index d7ddab2..bb1a63e 100644 --- a/tests/integration/test_orders_api.py +++ b/tests/integration/test_orders_api.py @@ -123,7 +123,7 @@ async def test_list_orders_basic(order_descriptions, session): @respx.mock @pytest.mark.asyncio async def test_list_orders_state(order_descriptions, session): - list_url = TEST_ORDERS_URL + '?state=failed' + list_url = TEST_ORDERS_URL + '?source_type=all&state=failed' order1, order2, _ = order_descriptions diff --git a/tests/integration/test_orders_cli.py b/tests/integration/test_orders_cli.py index 59909c8..16c7a9f 100644 --- a/tests/integration/test_orders_cli.py +++ b/tests/integration/test_orders_cli.py @@ -85,7 +85,7 @@ def test_cli_orders_list_empty(invoke): @respx.mock def test_cli_orders_list_state(invoke, order_descriptions): - list_url = TEST_ORDERS_URL + '?state=failed' + list_url = TEST_ORDERS_URL + '?source_type=all&state=failed' order1, order2, _ = order_descriptions
Include basemaps orders in `planet order list` output **Is your feature request related to a problem? Please describe.** Planet now supports [basemaps in orders](https://developers.planet.com/apis/orders/basemaps/). This works great with all the core orders commands, except for `list`. You can `get` a basemaps order, `wait`, and `download` it. **Describe the solution you'd like** To have `planet orders list` return basemaps and scene orders. The core API team wanted to make everything backwards compatible, so they kept the GET at https://api.planet.com/compute/ops/orders/v2/ to just the scene orders. They introduced a parameter called `source_type`, that can take values like `all`, `basemaps`, `scenes`, `basemaps,scenes`. I propose we just return `all`, since the SDK and CLI just output the JSON directly, so returning an extra JSON attribute doesn't effect anything. Our `list` should aim to show all the options at the end point. **Describe alternatives you've considered** We could consider supporting the `source_type` parameter as an option, with a default to `all`. But that could also be ticketed as its own issue. **Additional context**
0.0
8a458acfd4eb6e7eff5dbe275773ac84185424a3
[ "tests/integration/test_orders_api.py::test_list_orders_state", "tests/integration/test_orders_cli.py::test_cli_orders_list_state" ]
[ "[", "tests/integration/test_orders_api.py::test_OrderStates_reached", "tests/integration/test_orders_api.py::test_OrderStates_passed", "tests/integration/test_orders_api.py::test_list_orders_basic", "tests/integration/test_orders_api.py::test_list_orders_state_invalid_state", "tests/integration/test_orders_api.py::test_list_orders_limit[None-100]", "tests/integration/test_orders_api.py::test_list_orders_limit[0-102]", "tests/integration/test_orders_api.py::test_list_orders_limit[1-1]", "tests/integration/test_orders_api.py::test_create_order_basic", "tests/integration/test_orders_api.py::test_create_order_bad_item_type", "tests/integration/test_orders_api.py::test_create_order_item_id_does_not_exist", "tests/integration/test_orders_api.py::test_get_order", "tests/integration/test_orders_api.py::test_get_order_invalid_id", "tests/integration/test_orders_api.py::test_get_order_id_doesnt_exist", "tests/integration/test_orders_api.py::test_cancel_order", "tests/integration/test_orders_api.py::test_cancel_order_invalid_id", "tests/integration/test_orders_api.py::test_cancel_order_id_doesnt_exist", "tests/integration/test_orders_api.py::test_cancel_order_id_cannot_be_cancelled", "tests/integration/test_orders_api.py::test_cancel_orders_by_ids", "tests/integration/test_orders_api.py::test_cancel_orders_by_ids_invalid_id", "tests/integration/test_orders_api.py::test_cancel_orders_all", "tests/integration/test_orders_api.py::test_wait_default", "tests/integration/test_orders_api.py::test_wait_callback", "tests/integration/test_orders_api.py::test_wait_state", "tests/integration/test_orders_api.py::test_wait_max_attempts_enabled", "tests/integration/test_orders_api.py::test_wait_max_attempts_disabled", "tests/integration/test_orders_api.py::test_wait_invalid_oid", "tests/integration/test_orders_api.py::test_wait_invalid_state", "tests/integration/test_orders_api.py::test_aggegated_order_stats", "tests/integration/test_orders_api.py::test_download_asset_md", "tests/integration/test_orders_api.py::test_download_asset_img", "tests/integration/test_orders_api.py::test_validate_checksum_checksum[1-expectation0-MD5]", "tests/integration/test_orders_api.py::test_validate_checksum_checksum[1-expectation0-SHA256]", "tests/integration/test_orders_api.py::test_validate_checksum_checksum[1-expectation1-MD5]", "tests/integration/test_orders_api.py::test_validate_checksum_checksum[1-expectation1-SHA256]", "tests/integration/test_orders_api.py::test_validate_checksum_checksum[does", "tests/integration/test_orders_api.py::test_validate_checksum_manifest[True-False-expectation0]", "tests/integration/test_orders_api.py::test_validate_checksum_manifest[True-True-expectation1]", "tests/integration/test_orders_api.py::test_validate_checksum_manifest[False-False-expectation2]", "tests/integration/test_orders_api.py::test_download_order_success[None-paths0]", "tests/integration/test_orders_api.py::test_download_order_success[results1-paths1]", "tests/integration/test_orders_api.py::test_download_order_success[results2-paths2]", "tests/integration/test_orders_api.py::test_download_order_state", "tests/integration/test_orders_api.py::test_download_order_overwrite_true_preexisting_data", "tests/integration/test_orders_api.py::test_download_order_overwrite_false_preexisting_data", "tests/integration/test_orders_api.py::test_download_order_overwrite_true_nonexisting_data", "tests/integration/test_orders_api.py::test_download_order_overwrite_false_nonexisting_data", "tests/integration/test_orders_cli.py::test_cli_orders_list_basic", "tests/integration/test_orders_cli.py::test_cli_orders_list_empty", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[None-100]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[0-102]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[1-1]", "tests/integration/test_orders_cli.py::test_cli_orders_list_pretty", "tests/integration/test_orders_cli.py::test_cli_orders_get", "tests/integration/test_orders_cli.py::test_cli_orders_get_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_cancel", "tests/integration/test_orders_cli.py::test_cli_orders_cancel_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_wait_default", "tests/integration/test_orders_cli.py::test_cli_orders_wait_max_attempts", "tests/integration/test_orders_cli.py::test_cli_orders_download_default", "tests/integration/test_orders_cli.py::test_cli_orders_download_checksum", "tests/integration/test_orders_cli.py::test_cli_orders_download_dest", "tests/integration/test_orders_cli.py::test_cli_orders_download_overwrite", "tests/integration/test_orders_cli.py::test_cli_orders_download_state", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_request_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_request_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_request_item_type_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_request_product_bundle_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_request_product_bundle_incompatible", "tests/integration/test_orders_cli.py::test_cli_orders_request_id_empty", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[geom_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[feature_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[featurecollection_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_invalid_geometry", "tests/integration/test_orders_cli.py::test_cli_orders_request_both_clip_and_tools", "tests/integration/test_orders_cli.py::test_cli_orders_request_cloudconfig", "tests/integration/test_orders_cli.py::test_cli_orders_request_email", "tests/integration/test_orders_cli.py::test_cli_orders_request_tools", "tests/integration/test_orders_cli.py::test_cli_orders_request_no_stac" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
2022-11-14 23:04:46+00:00
apache-2.0
4,593
planetlabs__planet-client-python-780
diff --git a/docs/cli/cli-orders.md b/docs/cli/cli-orders.md index 7804dff..f2561c7 100644 --- a/docs/cli/cli-orders.md +++ b/docs/cli/cli-orders.md @@ -467,7 +467,23 @@ curl -s https://raw.githubusercontent.com/planetlabs/planet-client-python/main/d ### Harmonize -TODO +The harmonize tool allows you to compare data to different generations of satellites by radiometrically harmonizing imagery captured by one satellite instrument type to imagery captured by another. To harmonize your data to a sensor you must define the sensor you wish to harmonize with in your `tools.json`. Currently, only "PS2" (Dove Classic) and "Sentinel-2" are supported as target sensors. The Sentinel-2 target only harmonizes PSScene surface reflectance bundle types (`analytic_8b_sr_udm2`, `analytic_sr_udm2`). The PS2 target only works on analytic bundles from Dove-R (`PS2.SD`). + +```json +[ + { + "harmonize": { + "target_sensor": "Sentinel-2" + } + } +] +``` + +You may create an order request by calling `tools.json` with `--tools`. + +```console +planet orders request psscene analytic_sr_udm2 --name "Harmonized data" --id 20200925_161029_69_2223 --tools tools.json +``` ### STAC Metadata diff --git a/docs/resources/index.md b/docs/resources/index.md index 335931d..8da0223 100644 --- a/docs/resources/index.md +++ b/docs/resources/index.md @@ -2,26 +2,45 @@ title: Resources --- -## This Build +## Examples -[Planet](https://planet.com) Software Development Kit (SDK) for Python [![Build Status](https://travis-ci.org/planetlabs/planet-client-python.svg?branch=master)](https://travis-ci.org/planetlabs/planet-client-python) +### SDK examples +The following examples were created specifically to show you how to use the SDK and CLI: + +* [download_multiple_assets.py](https://github.com/planetlabs/planet-client-python/blob/main/examples/download_multiple_assets.py) - this Python script orders, activates, and downloads two assets +* [orders_create_and_download_multiple_orders.py](https://github.com/planetlabs/planet-client-python/blob/main/examples/orders_create_and_download_multiple_orders.py) - this Python script creates two orders, each with unique Area of Interest (AoI), preventing a combined download +* [Planet API Python Client](https://github.com/planetlabs/notebooks/blob/master/jupyter-notebooks/data-api-tutorials/planet_python_client_introduction.ipynb) - a Python notebook to introduce Planet’s Data API and the `planet` module +* [Orders API & Planet SDK](https://github.com/planetlabs/notebooks/blob/665f165e59f2c3584976ad2dde569c649e631c0b/jupyter-notebooks/orders_api_tutorials/Planet_SDK_Orders_demo.ipynb) - a Python notebook shows how to get started with Planet SDK and the Orders API. +* [Analysis Ready Data Tutorial Part 1: Introduction and Best Practices](https://github.com/planetlabs/notebooks/blob/6cc220ff6db246353af4798be219ee1fe7e858b0/jupyter-notebooks/analysis-ready-data/ard_1_intro_and_best_practices.ipynb) - this Python notebook uses the SDK to prepare Analysis Ready Data. +* [Analysis Ready Data Tutorial Part 2](https://github.com/planetlabs/notebooks/blob/6cc220ff6db246353af4798be219ee1fe7e858b0/jupyter-notebooks/analysis-ready-data/ard_2_use_case_1.ipynb) - the first use case in this Python notebook leverages the SDK’s `order_request` feature to prepare an NDVI time stack and the second use case visualizes the NDVI imagery. + +### Other examples + +Besides the SDK-specific examples, above, you can find many examples that show how to access Planet data in the documentation and Planet School at the [Planet Developers Center](https://developers.planet.com).Also, more working examples are on the [Planet Labs Python notebooks](https://github.com/planetlabs/notebooks) on GitHub. ## Planet APIs -## Planet Tutorials +This pre-release SDK has implemented interfaces for several Planet APIs. Check out the documentation for the underlying API: -## Contact Support +* [Data](https://developers.planet.com/docs/apis/data/) +* [Orders](https://developers.planet.com/apis/orders/) +* [Subscriptions](https://developers.planet.com/docs/subscriptions/) ## Email Developer Relations +We are eager to share this pre-release with you and encourage you to test your workflows rigorously. Based on your feedback, we may roll out additional updates to improve your experience. Besides joining the discussion, and filing issues and pull requests here, feel free to share your general feedback with us at [email protected]. +## Contribute to this open source project + +To contribute or develop with this library, see +[CONTRIBUTING](https://github.com/planetlabs/planet-client-python/blob/main/CONTRIBUTING.md). + ## Build Status +[Planet Software Development Kit (SDK) for Python main branch](https://github.com/planetlabs/planet-client-python) + [![Build Status](https://travis-ci.org/planetlabs/planet-client-python.svg?branch=master)](https://travis-ci.org/planetlabs/planet-client-python) -[Planet](https://planet.com) Software Development Kit (SDK) for Python. +## Version 1 of this SDK -## Contribute to this open source project - -To contribute or develop with this library, see -[CONTRIBUTING](https://github.com/planetlabs/planet-client-python/CONTRIBUTING.md). +[Version 1 of this SDK](https://github.com/planetlabs/planet-client-python/tree/1.5.2) is significantly different (see the [documentation](https://planet-sdk-for-python.readthedocs.io/en/latest/)). Version 2 is not backward compatible. Make sure to create a separate virtual environment if you need to work with both versions. For more information on how to do this, see the [Virtual Environments and the Planet SDK for Python](https://planet-sdk-for-python-v2.readthedocs.io/en/latest/get-started/venv-tutorial/). \ No newline at end of file diff --git a/planet/order_request.py b/planet/order_request.py index 34b4cbb..586c44f 100644 --- a/planet/order_request.py +++ b/planet/order_request.py @@ -429,10 +429,12 @@ def toar_tool(scale_factor: Optional[int] = None, ) -> dict: return _tool('toar', parameters) -def harmonize_tool() -> dict: +def harmonize_tool(target_sensor: str) -> dict: '''Create the API spec representation of a harmonize tool. - Currently, only "PS2" (Dove Classic) is supported as a target sensor, and - it will transform only items captured by “PS2.SD” (Dove-R). + Currently, only "PS2" (Dove Classic) and "Sentinel-2" are supported as + target sensors. The Sentinel-2 target only harmonizes PSScene + surface reflectance bundle types (analytic_8b_sr_udm2, analytic_sr_udm2). + The PS2 target only works on analytic bundles from Dove-R (PS2.SD). ''' - return _tool('harmonize', {'target_sensor': 'PS2'}) + return _tool('harmonize', {'target_sensor': target_sensor})
planetlabs/planet-client-python
ccc2953528b741c91c6fa7b51e1745f9ed5ab934
diff --git a/tests/unit/test_order_request.py b/tests/unit/test_order_request.py index 0163357..49e3554 100644 --- a/tests/unit/test_order_request.py +++ b/tests/unit/test_order_request.py @@ -251,3 +251,10 @@ def test_toar_tool(): tt_empty = order_request.toar_tool() expected_empty = {'toar': {}} assert tt_empty == expected_empty + + [email protected]("target_sensor", ["PS2", "Sentinel-2"]) +def test_harmonization_tool(target_sensor): + ht = order_request.harmonize_tool(target_sensor) + expected = {'harmonize': {'target_sensor': target_sensor}} + assert ht == expected
Add Sentinel-2 to Harmonization tool **Is your feature request related to a problem? Please describe.** Our SDK only allows data harmonization with Dove Classic, however the [openAPI spec ](https://developers.planet.com/apis/orders/reference/#tag/Orders/operation/createOrder) has added Sentinel-2 as a target sensor for harmonization. I propose we add Sentinel-2 as an option for harmonization to the Orders API. **Describe the solution you'd like** Current [harmonization tool](https://github.com/planetlabs/planet-client-python/blob/main/planet/order_request.py#L432): ``` def harmonize_tool() -> dict: '''Create the API spec representation of a harmonize tool. Currently, only "PS2" (Dove Classic) is supported as a target sensor, and it will transform only items captured by “PS2.SD” (Dove-R). ''' return _tool('harmonize', {'target_sensor': 'PS2'}) ``` Proposed solution: ``` def harmonize_tool(target_sensor: str) -> dict: '''Create the API spec representation of a harmonize tool. Currently, only "PS2" (Dove Classic) and "Sentinel-2" are supported as target sensors. Dove-R can only be transformed by PS2 and Sentinel-2 can only transform surface reflectance bundles for PSScene (ie., analytic_8b_sr_udm2, analytic_sr_udm2). ''' return _tool('harmonize', {'target_sensor': target_sensor}) ``` I also suggest adding a test for hamonization in `tests/unit/test_order_request.py`: ``` @pytest.mark.parametrize("target_sensor", ["PS2", "Sentinel-2"]) def test_harmonization_tool(target_sensor): ht = order_request.harmonize_tool(target_sensor) expected = {'harmonize': {'target_sensor': target_sensor}} assert ht == expected ``` **Additional context** I would double check the sensor harmonization compatibility on the [Dev Center](https://developers.planet.com/apis/orders/tools/#harmonization).
0.0
ccc2953528b741c91c6fa7b51e1745f9ed5ab934
[ "tests/unit/test_order_request.py::test_harmonization_tool[PS2]", "tests/unit/test_order_request.py::test_harmonization_tool[Sentinel-2]" ]
[ "tests/unit/test_order_request.py::test_build_request", "tests/unit/test_order_request.py::test_product", "tests/unit/test_order_request.py::test_notifications", "tests/unit/test_order_request.py::test_delivery", "tests/unit/test_order_request.py::test_amazon_s3", "tests/unit/test_order_request.py::test_azure_blob_storage", "tests/unit/test_order_request.py::test_google_cloud_storage", "tests/unit/test_order_request.py::test_google_earth_engine", "tests/unit/test_order_request.py::test__tool", "tests/unit/test_order_request.py::test_clip_tool", "tests/unit/test_order_request.py::test_reproject_tool", "tests/unit/test_order_request.py::test_tile_tool", "tests/unit/test_order_request.py::test_toar_tool" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2022-11-17 23:00:28+00:00
apache-2.0
4,594
planetlabs__planet-client-python-805
diff --git a/docs/cli/cli-orders.md b/docs/cli/cli-orders.md index 444994f..ab47780 100644 --- a/docs/cli/cli-orders.md +++ b/docs/cli/cli-orders.md @@ -107,19 +107,19 @@ If you don't have access to PlanetScope data then replace PSScene with SkySatCol Then make the following call: ```console -planet orders request PSScene visual --name 'My First Order' --id 20220605_124027_64_242b +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'My First Order' 20220605_124027_64_242b ``` Running the above command should output the JSON needed to create an order: ```json -{"name": "My First Order", "products": [{"item_ids": ["20220605_124027_64_242b"], "item_type": "PSScene", "product_bundle": "analytic_sr_udm2"}]} +{"name": "My First Order", "products": [{"item_ids": ["20220605_124027_64_242b"], "item_type": "PSScene", "product_bundle": "analytic_sr_udm2"}], "metadata": {"stac": {}}} ``` You can also use `jq` here to make it a bit more readable: ```console -planet orders request PSScene analytic_sr_udm2 --name 'My First Order' --id 20220605_124027_64_242b | jq +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'My First Order' 20220605_124027_64_242b | jq ``` ```json @@ -133,7 +133,10 @@ planet orders request PSScene analytic_sr_udm2 --name 'My First Order' --id 2022 "item_type": "PSScene", "product_bundle": "analytic_sr_udm2" } - ] + ], + "metadata": { + "stac": {} + } } ``` @@ -143,7 +146,7 @@ The above command just prints out the necessary JSON to create an order. To actu save the output into a file: ```console -planet orders request PSScene analytic_sr_udm2 --name "My First Order" --id 20220605_124027_64_242b \ +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name "My First Order" 20220605_124027_64_242b \ > request-1.json ``` @@ -200,8 +203,8 @@ passing the output of the `orders request` command directly to be the input of t command: ```console -planet orders request PSScene analytic_sr_udm2 --name 'Two Item Order' \ ---id 20220605_124027_64_242b,20220605_124025_34_242b | planet orders create - +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'Two Item Order' \ +20220605_124027_64_242b,20220605_124025_34_242b | planet orders create - ``` The Planet CLI is designed to work well with piping, as it aims at small commands that can be @@ -357,8 +360,8 @@ You can move that geometry to your current directory and use the following comma tweak the geometry.geojson to refer to where you downloaded it. ```console -planet orders request PSScene analytic_sr_udm2 --clip geometry.geojson --name clipped-geom \ - --id 20220605_124027_64_242b | planet orders create - +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --clip geometry.geojson --name clipped-geom \ + 20220605_124027_64_242b | planet orders create - ``` ### Additional Tools @@ -406,8 +409,8 @@ Example: `tools.json` Ordering two scenes is easy, just add another id: ```console -planet orders request PSScene analytic_sr_udm2 --name 'Two Scenes' \ - --id 20220605_124027_64_242b,20220605_124025_34_242b | planet orders create - +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'Two Scenes' \ + 20220605_124027_64_242b,20220605_124025_34_242b | planet orders create - ``` And then you can composite them together, using the 'tools' json. You can @@ -426,8 +429,8 @@ Once you've got it saved you call the `--tools` flag to refer to the JSON file, can pipe that to `orders create`. ```console -planet orders request PSScene analytic_sr_udm2 --name 'Two Scenes Composited' \ ---id 20220605_124027_64_242b,20220605_124025_34_242b --no-stac --tools tools-composite.json | planet orders create - +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'Two Scenes Composited' \ + 20220605_124027_64_242b,20220605_124025_34_242b --no-stac --tools tools-composite.json | planet orders create - ``` Note that we add the `--no-stac` option as [STAC Metadata](#stac-metadata) is not yet supported by the composite @@ -452,8 +455,8 @@ as COG in the file format tool. The following command just shows the output with [tools-cog.json](https://raw.githubusercontent.com/planetlabs/planet-client-python/main/docs/cli/request-json/tools-cog.json): ```console -planet orders request PSScene analytic_sr_udm2 --name 'COG Order' \ - --id 20220605_124027_64_242b,20220605_124025_34_242b --tools tools-cog.json +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'COG Order' \ + 20220605_124027_64_242b,20220605_124025_34_242b --tools tools-cog.json ``` As shown above you can also pipe that output directly in to `orders create`. @@ -504,16 +507,16 @@ so you can just use the [following json](https://raw.githubusercontent.com/plane ``` ```console -planet orders request PSScene analytic_sr_udm2 --no-stac --name 'Two Scenes Clipped and Composited' \ - --id 20220605_124027_64_242b,20220605_124025_34_242b --tools tools-clip-composite.json | planet orders create - +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --no-stac --name 'Two Scenes Clipped and Composited' \ + 20220605_124027_64_242b,20220605_124025_34_242b --tools tools-clip-composite.json | planet orders create - ``` One cool little trick is that you can even stream in the JSON directly with `curl`, piping it into the request: ```console curl -s https://raw.githubusercontent.com/planetlabs/planet-client-python/main/docs/cli/request-json/tools-clip-composite.json \ -| planet orders request PSScene analytic_sr_udm2 --name 'Streaming Clip & Composite' \ - --id 20220605_124027_64_242b,20220605_124025_34_242b --tools - | planet orders create - +| planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'Streaming Clip & Composite' \ + 20220605_124027_64_242b,20220605_124025_34_242b --tools - | planet orders create - ``` ### Harmonize @@ -533,7 +536,7 @@ The harmonize tool allows you to compare data to different generations of satell You may create an order request by calling [`tools-harmonize.json`](https://raw.githubusercontent.com/planetlabs/planet-client-python/main/docs/cli/request-json/tools-harmonize.json) with `--tools`. ```console -planet orders request psscene analytic_sr_udm2 --name 'Harmonized data' --id 20200925_161029_69_2223 --tools tools-harmonize.json +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'Harmonized data' 20200925_161029_69_2223 --tools tools-harmonize.json ``` ## More options @@ -668,9 +671,9 @@ image that was published: ```console -planet orders request SkySatCollect analytic --name 'SkySat Latest' \ ---id `planet data filter | planet data search SkySatCollect --sort 'acquired desc' --limit 1 - | jq -r .id` \ -| planet orders create - +planet orders request --item-type SkySatCollect --bundle analytic --name 'SkySat Latest' \ + `planet data filter | planet data search SkySatCollect --sort 'acquired desc' --limit 1 - | jq -r .id` \ +| planet orders create - ``` Or get the 5 latest cloud free images in an area and create an order that clips to that area, using @@ -679,8 +682,8 @@ Or get the 5 latest cloud free images in an area and create an order that clips ```console ids=`planet data filter --geom geometry.geojson --range clear_percent gt 90 | planet data \ search PSScene --limit 5 - | jq -r .id | tr '\n' , | sed 's/.$//'` -planet orders request PSScene analytic_sr_udm2 --name 'Clipped Scenes' \ ---id $ids --clip geometry.geojson | planet orders create - +planet orders request --item-type PSScene --bundle analytic_sr_udm2 --name 'Clipped Scenes' \ + $ids --clip geometry.geojson | planet orders create - ``` This one uses some advanced unix capabilities like `sed` and `tr`, along with unix variables, so more diff --git a/planet/cli/orders.py b/planet/cli/orders.py index b126a4d..998168e 100644 --- a/planet/cli/orders.py +++ b/planet/cli/orders.py @@ -225,22 +225,21 @@ async def create(ctx, request: str, pretty): @click.pass_context @translate_exceptions @coro [email protected]('item_type', - metavar='ITEM_TYPE', - type=click.Choice(planet.specs.get_item_types(), - case_sensitive=False)) [email protected]('bundle', - metavar='BUNDLE', - type=click.Choice(planet.specs.get_product_bundles(), - case_sensitive=False)) [email protected]('ids', metavar='IDS', type=types.CommaSeparatedString()) [email protected]('--item-type', + required=True, + help='Item type for requested item ids.', + type=click.Choice(planet.specs.get_item_types(), + case_sensitive=False)) [email protected]('--bundle', + required=True, + help='Asset type for the item.', + type=click.Choice(planet.specs.get_product_bundles(), + case_sensitive=False)) @click.option('--name', required=True, help='Order name. Does not need to be unique.', type=click.STRING) [email protected]('--id', - help='One or more comma-separated item IDs.', - type=types.CommaSeparatedString(), - required=True) @click.option('--clip', type=types.JSON(), help="""Clip feature GeoJSON. Can be a json string, filename, @@ -270,7 +269,7 @@ async def request(ctx, item_type, bundle, name, - id, + ids, clip, tools, email, @@ -280,11 +279,13 @@ async def request(ctx, """Generate an order request. This command provides support for building an order description used - in creating an order. It outputs the order request, optionally pretty- - printed. + in creating an order. It outputs the order request, optionally + pretty-printed. + + IDs is one or more comma-separated item IDs. """ try: - product = planet.order_request.product(id, bundle, item_type) + product = planet.order_request.product(ids, bundle, item_type) except planet.specs.SpecificationException as e: raise click.BadParameter(e)
planetlabs/planet-client-python
8d67d237c07606a1fd3f76abb96e518def506b54
diff --git a/tests/integration/test_orders_cli.py b/tests/integration/test_orders_cli.py index 16c7a9f..4cfda87 100644 --- a/tests/integration/test_orders_cli.py +++ b/tests/integration/test_orders_cli.py @@ -461,10 +461,10 @@ def test_cli_orders_request_basic_success(expected_ids, stac_json): result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - f'--id={id_string}', + id_string, ]) assert not result.exception @@ -485,45 +485,44 @@ def test_cli_orders_request_basic_success(expected_ids, def test_cli_orders_request_item_type_invalid(invoke): result = invoke([ 'request', - 'invalid' - 'analytic', + '--item-type=invalid' + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', ]) assert result.exit_code == 2 - error_msg = "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" - assert error_msg in result.output def test_cli_orders_request_product_bundle_invalid(invoke): result = invoke([ 'request', - 'PSScene' - 'invalid', + '--item-type=PSScene' + '--bundle=invalid', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', ]) assert result.exit_code == 2 - error_msg = "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" - assert error_msg in result.output def test_cli_orders_request_product_bundle_incompatible(invoke): result = invoke([ 'request', - 'PSScene', - 'analytic', + '--item-type=PSScene', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', ]) assert result.exit_code == 2 - error_msg = "Usage: main orders request [OPTIONS] ITEM_TYPE BUNDLE" - assert error_msg in result.output def test_cli_orders_request_id_empty(invoke): - result = invoke( - ['request', 'PSOrthoTile', 'analytic', '--name=test', '--id=']) + result = invoke([ + 'request', + '--item-type=PSOrthoTile', + '--bundle=analytic', + '--name=test', + '' + ]) assert result.exit_code == 2 assert 'Entry cannot be an empty string.' in result.output @@ -541,10 +540,10 @@ def test_cli_orders_request_clip_success(geom_fixture, result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', f'--clip={json.dumps(geom)}', ]) assert result.exit_code == 0 @@ -571,10 +570,10 @@ def test_cli_orders_request_clip_success(geom_fixture, def test_cli_orders_request_clip_invalid_geometry(invoke, point_geom_geojson): result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', f'--clip={json.dumps(point_geom_geojson)}' ]) assert result.exit_code == 2 @@ -588,10 +587,10 @@ def test_cli_orders_request_both_clip_and_tools(invoke, geom_geojson): # option values are valid json result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', f'--clip={json.dumps(geom_geojson)}', f'--tools={json.dumps(geom_geojson)}' ]) @@ -613,10 +612,10 @@ def test_cli_orders_request_cloudconfig(invoke, stac_json): result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', f'--cloudconfig={json.dumps(config_json)}', ]) assert result.exit_code == 0 @@ -640,10 +639,10 @@ def test_cli_orders_request_cloudconfig(invoke, stac_json): def test_cli_orders_request_email(invoke, stac_json): result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', '--email' ]) assert result.exit_code == 0 @@ -671,10 +670,10 @@ def test_cli_orders_request_tools(invoke, geom_geojson, stac_json): result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', f'--tools={json.dumps(tools_json)}' ]) @@ -699,10 +698,10 @@ def test_cli_orders_request_no_stac(invoke): result = invoke([ 'request', - 'PSOrthoTile', - 'analytic', + '--item-type=PSOrthoTile', + '--bundle=analytic', '--name=test', - '--id=4500474_2133707_2021-05-20_2419', + '4500474_2133707_2021-05-20_2419', '--no-stac' ])
Revise `planet orders request` arguments The arguments for `planet orders request` originally were all flags. @jreiberkyle proposed a change in https://github.com/planetlabs/planet-client-python/pull/616 to go to all positional arguments, and then in #680 we got a sort of half way there with positional arguments for items and bundles, but not id or name. @jreiberkyle and I discussed at length and concluded the way forward is to do a single positional argument of `id`, with the rest as flags. The hope is that we could implement some of the ideas in https://github.com/planetlabs/planet-client-python/issues/523 where the only required argument would be `id`. But if that doesn't work having a single positional argument feels much better than having 4, where you'd have to remember the exact order, or 0, where it's all flags so a lot more to type out. This also seems generally compatible with the other commands in the CLI, where most have one or two positional arguments, which seems to be a general best practice for CLI's.
0.0
8d67d237c07606a1fd3f76abb96e518def506b54
[ "tests/integration/test_orders_cli.py::test_cli_orders_request_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_request_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_request_id_empty", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[geom_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[feature_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_success[featurecollection_geojson]", "tests/integration/test_orders_cli.py::test_cli_orders_request_clip_invalid_geometry", "tests/integration/test_orders_cli.py::test_cli_orders_request_both_clip_and_tools", "tests/integration/test_orders_cli.py::test_cli_orders_request_cloudconfig", "tests/integration/test_orders_cli.py::test_cli_orders_request_email", "tests/integration/test_orders_cli.py::test_cli_orders_request_tools", "tests/integration/test_orders_cli.py::test_cli_orders_request_no_stac" ]
[ "tests/integration/test_orders_cli.py::test_cli_orders_list_basic", "tests/integration/test_orders_cli.py::test_cli_orders_list_empty", "tests/integration/test_orders_cli.py::test_cli_orders_list_state", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[None-100]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[0-102]", "tests/integration/test_orders_cli.py::test_cli_orders_list_limit[1-1]", "tests/integration/test_orders_cli.py::test_cli_orders_list_pretty", "tests/integration/test_orders_cli.py::test_cli_orders_get", "tests/integration/test_orders_cli.py::test_cli_orders_get_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_cancel", "tests/integration/test_orders_cli.py::test_cli_orders_cancel_id_not_found", "tests/integration/test_orders_cli.py::test_cli_orders_wait_default", "tests/integration/test_orders_cli.py::test_cli_orders_wait_max_attempts", "tests/integration/test_orders_cli.py::test_cli_orders_download_default", "tests/integration/test_orders_cli.py::test_cli_orders_download_checksum", "tests/integration/test_orders_cli.py::test_cli_orders_download_dest", "tests/integration/test_orders_cli.py::test_cli_orders_download_overwrite", "tests/integration/test_orders_cli.py::test_cli_orders_download_state", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]", "tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]", "tests/integration/test_orders_cli.py::test_cli_orders_request_item_type_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_request_product_bundle_invalid", "tests/integration/test_orders_cli.py::test_cli_orders_request_product_bundle_incompatible" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2022-12-01 18:29:44+00:00
apache-2.0
4,595
planetlabs__planet-client-python-816
diff --git a/planet/clients/subscriptions.py b/planet/clients/subscriptions.py index dd884a1..ac9ea69 100644 --- a/planet/clients/subscriptions.py +++ b/planet/clients/subscriptions.py @@ -221,7 +221,6 @@ class SubscriptionsClient: class _ResultsPager(Paged): """Navigates pages of messages about subscription results.""" - NEXT_KEY = '_next' ITEMS_KEY = 'results' params = {'status': [val for val in status or {}]}
planetlabs/planet-client-python
b9dc4aecad57138b54df1e895373d29bbaba4250
diff --git a/tests/integration/test_subscriptions_api.py b/tests/integration/test_subscriptions_api.py index 4658660..44daf66 100644 --- a/tests/integration/test_subscriptions_api.py +++ b/tests/integration/test_subscriptions_api.py @@ -122,7 +122,7 @@ def result_pages(status=None, size=40): pm = datetime.now().isoformat() if len(results) == size: url = f'https://api.planet.com/subscriptions/v1/42/results?pm={pm}' - page['_links']['_next'] = url + page['_links']['next'] = url pages.append(page) return pages
`planet subscriptions results` not returning more than 20 results **Expected behavior** `planet subscriptions results <sub-id>` to return 100 results as specified as the default in the documentation. Or to return the proper number of results if `--limit` is greater than 20 (it works properly if the limit is less than 20) **Actual behavior (describe the problem)** Only 20 results are returned, even if the `--limit` is set to be greater than 20. It seems like the cli client is not actually paging through results - it just does the first 20 and stops. I verified that the http endpoint has more results. It has 20 on the first page, but it has a 'next' link that has 20 per page. **Workaround** Going to the http end point directly and following the 'next' links? **Minimum, Complete, Viable Code Sample** ```console > planet subscriptions results <sub-id> --limit 21 ``` Should result in 21 calls returned, only 20 are. **Environment Information** * Operation System Information: macOS-12.2.1-arm64-arm-64bit * Python version 3.9.15: (main, Oct 11 2022, 21:39:54) [Clang 14.0.0 (clang-1400.0.29.102)] * Planet package version: planet, version 2.0a7dev **Installation Method** * pip
0.0
b9dc4aecad57138b54df1e895373d29bbaba4250
[ "tests/integration/test_subscriptions_api.py::test_get_results_success" ]
[ "tests/integration/test_subscriptions_api.py::test_list_subscriptions_failure", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_success[status0-100]", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_success[status1-0]", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_success[None-100]", "tests/integration/test_subscriptions_api.py::test_create_subscription_failure", "tests/integration/test_subscriptions_api.py::test_create_subscription_success", "tests/integration/test_subscriptions_api.py::test_cancel_subscription_failure", "tests/integration/test_subscriptions_api.py::test_cancel_subscription_success", "tests/integration/test_subscriptions_api.py::test_update_subscription_failure", "tests/integration/test_subscriptions_api.py::test_update_subscription_success", "tests/integration/test_subscriptions_api.py::test_get_subscription_failure", "tests/integration/test_subscriptions_api.py::test_get_subscription_success", "tests/integration/test_subscriptions_api.py::test_get_results_failure", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_cycle_break" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2022-12-03 01:32:23+00:00
apache-2.0
4,596
planetlabs__planet-client-python-850
diff --git a/planet/cli/data.py b/planet/cli/data.py index b730609..dc0bc8e 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -352,8 +352,19 @@ async def search_get(ctx, search_id, pretty): echo_json(items, pretty) [email protected]() [email protected]_context +@translate_exceptions +@coro [email protected]('search_id') +async def search_delete(ctx, search_id): + """Delete an existing saved search. + """ + async with data_client(ctx) as cl: + await cl.delete_search(search_id) + + # TODO: search_update()". -# TODO: search_delete()". # TODO: search_run()". # TODO: item_get()". # TODO: asset_activate()".
planetlabs/planet-client-python
6feaf21163f2ca022c387e0344330632a2444de0
diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 109497c..881693c 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -53,6 +53,7 @@ def test_data_command_registered(invoke): assert "search" in result.output assert "search-create" in result.output assert "search-get" in result.output + assert "search-delete" in result.output # Add other sub-commands here. @@ -701,9 +702,31 @@ def test_search_get_id_not_found(invoke, search_id): assert 'Error: {"message": "Error message"}\n' == result.output [email protected] +def test_search_delete_success(invoke, search_id, search_result): + delete_url = f'{TEST_SEARCHES_URL}/{search_id}' + mock_resp = httpx.Response(HTTPStatus.NO_CONTENT, json=search_result) + respx.delete(delete_url).return_value = mock_resp + + result = invoke(['search-delete', search_id]) + + assert not result.exception + + [email protected] +def test_search_delete_nonexistant_search_id(invoke, search_id, search_result): + delete_url = f'{TEST_SEARCHES_URL}/{search_id}' + mock_resp = httpx.Response(404, json=search_result) + respx.delete(delete_url).return_value = mock_resp + + result = invoke(['search-delete', search_id]) + + assert result.exception + assert result.exit_code == 1 + + # TODO: basic test for "planet data search-create". # TODO: basic test for "planet data search-update". -# TODO: basic test for "planet data search-delete". # TODO: basic test for "planet data search-get". # TODO: basic test for "planet data search-list". # TODO: basic test for "planet data search-run".
Implement CLI command: planet data search-delete As specified in https://github.com/planetlabs/planet-client-python/blob/main/design-docs/CLI-Data.md#search-delete
0.0
6feaf21163f2ca022c387e0344330632a2444de0
[ "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_search_delete_success", "tests/integration/test_data_cli.py::test_search_delete_nonexistant_search_id" ]
[ "tests/integration/test_data_cli.py::test_data_search_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_search_create_daily_email", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{1:1}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene,", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2023-02-08 20:24:36+00:00
apache-2.0
4,597
planetlabs__planet-client-python-852
diff --git a/planet/cli/data.py b/planet/cli/data.py index dc0bc8e..e4fa562 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -364,7 +364,41 @@ async def search_delete(ctx, search_id): await cl.delete_search(search_id) -# TODO: search_update()". [email protected]() [email protected]_context +@translate_exceptions +@coro [email protected]('search_id') [email protected]('name') [email protected]("item_types", + type=types.CommaSeparatedString(), + callback=check_item_types) [email protected]('filter', type=types.JSON()) [email protected]('--daily-email', + is_flag=True, + help='Send a daily email when new results are added.') +@pretty +async def search_update(ctx, + search_id, + name, + item_types, + filter, + daily_email, + pretty): + """Update a saved search with the given search request. + + This function outputs a full JSON description of the updated search, + optionally pretty-printed. + """ + async with data_client(ctx) as cl: + items = await cl.update_search(search_id, + name, + item_types, + filter, + daily_email) + echo_json(items, pretty) + + # TODO: search_run()". # TODO: item_get()". # TODO: asset_activate()".
planetlabs/planet-client-python
082d117db8adfc85881508e871bdafce1f6cbbbb
diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 881693c..223f3b3 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -54,6 +54,7 @@ def test_data_command_registered(invoke): assert "search-create" in result.output assert "search-get" in result.output assert "search-delete" in result.output + assert "search-update" in result.output # Add other sub-commands here. @@ -87,6 +88,12 @@ def default_filters(): return [PERMISSION_FILTER, STD_QUALITY_FILTER] [email protected] +def search_filter(get_test_file_json): + filename = 'data_search_filter_2022-01.json' + return get_test_file_json(filename) + + @pytest.fixture def assert_and_filters_equal(): """Check for equality when the order of the config list doesn't matter""" @@ -725,8 +732,54 @@ def test_search_delete_nonexistant_search_id(invoke, search_id, search_result): assert result.exit_code == 1 [email protected]("item_types", + ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) [email protected] +def test_search_update_success(invoke, + search_id, + search_result, + item_types, + search_filter): + update_url = f'{TEST_SEARCHES_URL}/{search_id}' + mock_resp = httpx.Response(HTTPStatus.OK, json=search_result) + respx.put(update_url).return_value = mock_resp + + name = "search_name" + + result = invoke([ + 'search-update', + search_id, + name, + item_types, + json.dumps(search_filter) + ]) + + assert not result.exception + + [email protected] +def test_search_update_fail(invoke, search_id, search_filter): + update_url = f'{TEST_SEARCHES_URL}/{search_id}' + error_json = {"message": "Error message"} + mock_resp = httpx.Response(404, json=error_json) + respx.put(update_url).return_value = mock_resp + + name = "search_name" + item_types = "PSScene" + + result = invoke([ + 'search-update', + search_id, + name, + item_types, + json.dumps(search_filter) + ]) + + assert result.output.startswith("Error") + assert result.exception + + # TODO: basic test for "planet data search-create". -# TODO: basic test for "planet data search-update". # TODO: basic test for "planet data search-get". # TODO: basic test for "planet data search-list". # TODO: basic test for "planet data search-run".
Implement CLI command: planet data search-update As specified in https://github.com/planetlabs/planet-client-python/blob/main/design-docs/CLI-Data.md#search-update
0.0
082d117db8adfc85881508e871bdafce1f6cbbbb
[ "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_search_update_success[PSScene]", "tests/integration/test_data_cli.py::test_search_update_success[SkySatScene]", "tests/integration/test_data_cli.py::test_search_update_success[PSScene,", "tests/integration/test_data_cli.py::test_search_update_fail" ]
[ "tests/integration/test_data_cli.py::test_data_search_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[None-None---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1-None-None]", "tests/integration/test_data_cli.py::test_data_filter_defaults[--std-quality=False-s_remove1---permission=False-p_remove1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_search_create_daily_email", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{1:1}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene,", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found", "tests/integration/test_data_cli.py::test_search_delete_success", "tests/integration/test_data_cli.py::test_search_delete_nonexistant_search_id" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
2023-02-14 22:10:42+00:00
apache-2.0
4,598
planetlabs__planet-client-python-858
diff --git a/CHANGES.txt b/CHANGES.txt index 0e5970a..fb8f864 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,9 @@ +2.0b2 (TBD) + +Added: +- The Session class can now construct clients by name with its client method + (#858). + 2.0.0-beta.1 (2022-12-07) Changed: diff --git a/docs/get-started/upgrading.md b/docs/get-started/upgrading.md index 7edf21f..a82fafa 100644 --- a/docs/get-started/upgrading.md +++ b/docs/get-started/upgrading.md @@ -24,7 +24,7 @@ The best way of doing this is wrapping any code that invokes a client class in a ```python async with Session() as session: - client = OrdersClient(session) + client = session.client('orders') result = await client.create_order(order) # Process result ``` @@ -40,12 +40,12 @@ In V2, all `*Client` methods (for example, `DataClient().search`, `OrderClient() ```python import asyncio from datetime import datetime -from planet import Session, DataClient +from planet import Session from planet import data_filter as filters async def do_search(): async with Session() as session: - client = DataClient(session) + client = session.client('data') date_filter = filters.date_range_filter('acquired', gte=datetime.fromisoformat("2022-11-18"), lte=datetime.fromisoformat("2022-11-21")) cloud_filter = filters.range_filter('cloud_cover', lte=0.1) download_filter = filters.permission_filter() @@ -74,11 +74,11 @@ Is now ```python async with Session() as session: - items = [i async for i in planet.DataClient(session).search(["PSScene"], all_filters)] + items = [i async for i in session.client('data').search(["PSScene"], all_filters)] ``` ## Orders API -The Orders API capabilities in V1 were quite primitive, but those that did exist have been retained in much the same form; `ClientV1().create_order` becomes `OrderClient(session).create_order`. (As with the `DataClient`, you must also use `async` and `Session` with `OrderClient`.) +The Orders API capabilities in V1 were quite primitive, but those that did exist have been retained in much the same form; `ClientV1().create_order` becomes `OrdersClient(session).create_order`. (As with the `DataClient`, you must also use `async` and `Session` with `OrdersClient`.) Additionally, there is now also an order builder in `planet.order_request`, similar to the preexisting search filter builder. For more details on this, refer to the [Creating an Order](../../python/sdk-guide/#creating-an-order). diff --git a/docs/python/sdk-guide.md b/docs/python/sdk-guide.md index 4e1ac2c..ba7a0e0 100644 --- a/docs/python/sdk-guide.md +++ b/docs/python/sdk-guide.md @@ -116,7 +116,7 @@ from planet import OrdersClient async def main(): async with Session() as sess: - client = OrdersClient(sess) + client = sess.client('orders') # perform operations here asyncio.run(main()) @@ -198,7 +198,7 @@ the context of a `Session` with the `OrdersClient`: ```python async def main(): async with Session() as sess: - cl = OrdersClient(sess) + cl = sess.client('orders') order = await cl.create_order(request) asyncio.run(main()) @@ -222,7 +222,7 @@ from planet import reporting async def create_wait_and_download(): async with Session() as sess: - cl = OrdersClient(sess) + cl = sess.client('orders') with reporting.StateBar(state='creating') as bar: # create order order = await cl.create_order(request) @@ -272,7 +272,7 @@ from planet import collect, OrdersClient, Session async def main(): async with Session() as sess: - client = OrdersClient(sess) + client = sess.client('orders') orders_list = collect(client.list_orders()) asyncio.run(main()) @@ -297,7 +297,7 @@ from planet import DataClient async def main(): async with Session() as sess: - client = DataClient(sess) + client = sess.client('data') # perform operations here asyncio.run(main()) @@ -344,7 +344,7 @@ the context of a `Session` with the `DataClient`: ```python async def main(): async with Session() as sess: - cl = DataClient(sess) + cl = sess.client('data') items = [i async for i in cl.search(['PSScene'], sfilter)] asyncio.run(main()) @@ -364,7 +364,7 @@ print command to report wait status. `download_asset` has reporting built in. ```python async def download_and_validate(): async with Session() as sess: - cl = DataClient(sess) + cl = sess.client('data') # get asset description item_type_id = 'PSScene' diff --git a/planet/__init__.py b/planet/__init__.py index 6f081a6..fcaf682 100644 --- a/planet/__init__.py +++ b/planet/__init__.py @@ -16,7 +16,7 @@ from .http import Session from . import order_request, reporting from .__version__ import __version__ # NOQA from .auth import Auth -from .clients import DataClient, OrdersClient # NOQA +from .clients import DataClient, OrdersClient, SubscriptionsClient # NOQA from .io import collect __all__ = [ @@ -24,6 +24,7 @@ __all__ = [ 'collect', 'DataClient' 'OrdersClient', + 'SubscriptionsClient', 'order_request', 'reporting', 'Session', diff --git a/planet/clients/__init__.py b/planet/clients/__init__.py index 9fb246c..138726a 100644 --- a/planet/clients/__init__.py +++ b/planet/clients/__init__.py @@ -12,10 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from .data import DataClient from .orders import OrdersClient +from .subscriptions import SubscriptionsClient __all__ = [ 'DataClient', 'OrdersClient', + 'SubscriptionsClient', ] + +# Organize client classes by their module name to allow concise lookup. +_client_directory = { + 'data': DataClient, + 'orders': OrdersClient, + 'subscriptions': SubscriptionsClient +} diff --git a/planet/http.py b/planet/http.py index 97983c0..0f649ea 100644 --- a/planet/http.py +++ b/planet/http.py @@ -22,7 +22,9 @@ import logging import random import time from typing import AsyncGenerator, Optional + import httpx +from typing_extensions import Literal from .auth import Auth, AuthType from . import exceptions, models @@ -413,6 +415,29 @@ class Session(BaseSession): finally: await response.aclose() + def client(self, + name: Literal['data', 'orders', 'subscriptions'], + base_url: Optional[str] = None) -> object: + """Get a client by its module name. + + Parameters: + name: one of 'data', 'orders', or 'subscriptions'. + + Returns: + A client instance. + + Raises: + ClientError when no such client can be had. + + """ + # To avoid circular dependency. + from planet.clients import _client_directory + + try: + return _client_directory[name](self, base_url=base_url) + except KeyError: + raise exceptions.ClientError("No such client.") + class AuthSession(BaseSession): """Synchronous connection to the Planet Auth service.""" diff --git a/setup.py b/setup.py index 1255154..85b94f9 100644 --- a/setup.py +++ b/setup.py @@ -30,6 +30,7 @@ install_requires = [ 'jsonschema', 'pyjwt>=2.1', 'tqdm>=4.56', + 'typing-extensions', ] test_requires = ['pytest', 'pytest-asyncio==0.16', 'pytest-cov', 'respx==0.19']
planetlabs/planet-client-python
7c4d4d1fcf087954fa4de806da10fd2cc153289f
diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py new file mode 100644 index 0000000..bf5baca --- /dev/null +++ b/tests/unit/test_session.py @@ -0,0 +1,23 @@ +"""Session module tests.""" + +import pytest + +from planet import DataClient, OrdersClient, SubscriptionsClient, Session +from planet.exceptions import ClientError + + [email protected]("client_name,client_class", + [('data', DataClient), ('orders', OrdersClient), + ('subscriptions', SubscriptionsClient)]) +def test_session_get_client(client_name, client_class): + """Get a client from a session.""" + session = Session() + client = session.client(client_name) + assert isinstance(client, client_class) + + +def test_session_get_client_error(): + """Get an exception when no such client exists.""" + session = Session() + with pytest.raises(ClientError): + _ = session.client('bogus')
Add a method to get a client from a Session instance As in AWS boto3: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/session.html#custom-session. For planet, this would look like ```python session = planet.Session(...) client = session.client("data") ``` Currently, we need to do the following. ``` session = planet.Session(...) client = DataClient(session) ``` There's nothing wrong with this kind of dependency injection, and we're going to keep this usage. But what I'm proposing is more handy and is easy to remember.
0.0
7c4d4d1fcf087954fa4de806da10fd2cc153289f
[ "tests/unit/test_session.py::test_session_get_client[data-DataClient]", "tests/unit/test_session.py::test_session_get_client[orders-OrdersClient]", "tests/unit/test_session.py::test_session_get_client[subscriptions-SubscriptionsClient]", "tests/unit/test_session.py::test_session_get_client_error" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-02-26 23:59:55+00:00
apache-2.0
4,599
planetlabs__planet-client-python-859
diff --git a/design-docs/CLI-Subscriptions.md b/design-docs/CLI-Subscriptions.md index 043b858..599a740 100644 --- a/design-docs/CLI-Subscriptions.md +++ b/design-docs/CLI-Subscriptions.md @@ -184,7 +184,7 @@ planet subscriptions request [OPTIONS] Generate a subscriptions request. -This command provides support for building the subscription request JSON used to create or +This command provides support for building the subscription request JSON used to create or update a subscription. It outputs the subscription request. Options: @@ -207,7 +207,10 @@ Options: ### Usage Examples ``` -planet subscription request --source source.json --clip geom.json --delivery delivery.json | planet subscriptions create - +planet subscription request \ + --name test \ + --source source.json \ + --delivery delivery.json | planet subscriptions create - ``` ## Request-catalog @@ -222,13 +225,13 @@ Generate a subscriptions request source JSON for a catalog. Options: --asset-types TEXT One or more comma-separated asset types. Required. --item-types TEXT One or more comma-separated item-types. Required. - --geom JSON geometry of the area of interest of the subscription that will be used to determine matches. + --geometry JSON geometry of the area of interest of the subscription that will be used to determine matches. Can be a json string, filename, or '-' for stdin. --start-time DATETIME Start date and time to begin subscription. --end-time DATETIME Date and time to end the subscription. - --rrule TEXT iCalendar recurrance rule to specify recurrances. - --filter JSON A search filter can be specified a json string, - filename, or '-' for stdin. + --rrule TEXT iCalendar recurrance rule to specify recurrances. + --filter JSON A search filter can be specified a json string, + filename, or '-' for stdin. --pretty Format JSON output. --help Show this message and exit. ``` @@ -236,13 +239,12 @@ Options: ### Usage Examples ``` -planet subscriptions request \ - --source $(planet subscriptions request-catalog - --item-type PSScene - --asset-types ortho_analytic_8b_sr,ortho_udm2 - --geom aoi.json - --start-time 05/01/2022) - --delivery delivery.json | planet subscriptions create - +planet subscriptions request-catalog \ + --item-types PSScene \ + --asset-types ortho_analytic_8b_sr,ortho_udm2 \ + --geometry aoi.geojson \ + --start-time 2022-01-01) \ + --delivery delivery.json > source.json ``` ## Request-other @@ -257,7 +259,7 @@ Generate a subscriptions request source JSON for another product. Options: --type Type. --id Id. - --geom JSON geometry of the area of interest of the subscription that will be used to determine matches. + --geometry JSON geometry of the area of interest of the subscription that will be used to determine matches. Can be a json string, filename, or '-' for stdin. --start-time DATETIME Start date and time to begin subscription. --end-time DATETIME Date and time to end the subscription. @@ -266,18 +268,6 @@ Options: --help Show this message and exit. ``` -### Usage Examples - -``` -planet subscriptions request \ - --source $(planet subscriptions request-other - --type othertype - --id otherid - --geom aoi.json - --start-time 05/01/2022) - --delivery delivery.json | planet subscriptions create - -``` - ## Update diff --git a/planet/cli/subscriptions.py b/planet/cli/subscriptions.py index 8cc508a..7e0b21d 100644 --- a/planet/cli/subscriptions.py +++ b/planet/cli/subscriptions.py @@ -9,6 +9,7 @@ from .io import echo_json from .options import limit, pretty from .session import CliSession from planet.clients.subscriptions import SubscriptionsClient +from .. import subscription_request @asynccontextmanager @@ -157,3 +158,84 @@ async def list_subscription_results_cmd(ctx, status=status, limit=limit): echo_json(result, pretty) + + [email protected]() [email protected]('--name', + required=True, + type=str, + help='Subscription name. Does not need to be unique.') [email protected]('--source', + required=True, + type=types.JSON(), + help='Source JSON. Can be a string, filename, or - for stdin.') [email protected]('--delivery', + required=True, + type=types.JSON(), + help='Delivery JSON. Can be a string, filename, or - for stdin.') [email protected]( + '--notifications', + type=types.JSON(), + help='Notifications JSON. Can be a string, filename, or - for stdin.') [email protected]('--tools', + type=types.JSON(), + help='Toolchain JSON. Can be a string, filename, or - for stdin.' + ) +@pretty +def request(name, source, delivery, notifications, tools, pretty): + """Generate a subscriptions request.""" + res = subscription_request.build_request(name, + source, + delivery, + notifications=notifications, + tools=tools) + echo_json(res, pretty) + + [email protected]() [email protected]('--item-types', + required=True, + type=types.CommaSeparatedString(), + help='One or more comma-separated item types.') [email protected]('--asset-types', + required=True, + type=types.CommaSeparatedString(), + help='One or more comma-separated asset types.') [email protected]( + '--geometry', + required=True, + type=types.JSON(), + help="""Geometry of the area of interest of the subscription that will be + used to determine matches. Can be a string, filename, or - for stdin.""") [email protected]('--start-time', + required=True, + type=types.DateTime(), + help='Date and time to begin subscription.') [email protected]('--end-time', + type=types.DateTime(), + help='Date and time to end subscription.') [email protected]('--rrule', + type=str, + help='iCalendar recurrance rule to specify recurrances.') [email protected]('--filter', + type=types.JSON(), + help='Search filter. Can be a string, filename, or - for stdin.' + ) +@pretty +def request_catalog(item_types, + asset_types, + geometry, + start_time, + end_time, + rrule, + filter, + pretty): + """Generate a subscriptions request catalog source description.""" + res = subscription_request.catalog_source(item_types, + asset_types, + geometry, + start_time, + end_time=end_time, + rrule=rrule, + filter=filter) + echo_json(res, pretty)
planetlabs/planet-client-python
e90f3e27e0ce4b9b357566214f866b18a6644772
diff --git a/tests/integration/test_subscriptions_cli.py b/tests/integration/test_subscriptions_cli.py index 6070d21..edaa3c2 100644 --- a/tests/integration/test_subscriptions_cli.py +++ b/tests/integration/test_subscriptions_cli.py @@ -12,7 +12,6 @@ There are 6 subscriptions commands: TODO: tests for 3 options of the planet-subscriptions-results command. """ - import json from click.testing import CliRunner @@ -250,3 +249,60 @@ def test_subscriptions_results_success(invoke, options, expected_count): assert result.exit_code == 0 # success. assert result.output.count('"id"') == expected_count + + +def test_request_base_success(invoke, geom_geojson): + """Request command succeeds""" + source = json.dumps({ + "type": "catalog", + "parameters": { + "geometry": geom_geojson, + "start_time": "2021-03-01T00:00:00Z", + "end_time": "2023-11-01T00:00:00Z", + "rrule": "FREQ=MONTHLY;BYMONTH=3,4,5,6,7,8,9,10", + "item_types": ["PSScene"], + "asset_types": ["ortho_analytic_4b"] + } + }) + delivery = json.dumps({ + "type": "amazon_s3", + "parameters": { + "aws_access_key_id": "keyid", + "aws_secret_access_key": "accesskey", + "bucket": "bucket", + "aws_region": "region" + } + }) + + result = invoke([ + 'request', + '--name=test', + f'--source={source}', + f'--delivery={delivery}' + ]) + + assert source in result.output + assert result.exit_code == 0 # success. + + +def test_request_catalog_success(invoke, geom_geojson): + """Request-catalog command succeeds""" + source = { + "type": "catalog", + "parameters": { + "geometry": geom_geojson, + "start_time": "2021-03-01T00:00:00Z", + "item_types": ["PSScene"], + "asset_types": ["ortho_analytic_4b"] + } + } + + result = invoke([ + 'request-catalog', + '--item-types=PSScene', + '--asset-types=ortho_analytic_4b', + f"--geometry={json.dumps(geom_geojson)}", + '--start-time=2021-03-01T00:00:00' + ]) + assert json.loads(result.output) == source + assert result.exit_code == 0 # success.
Helper methods for subscriptions For the MVP of subscriptions we just have users directly form the entire json request, so we could ship something and iterate. Now that it's out we should consider what we want to do to help users. To have one or more options like `planet data filter` or `planet order request`. A user should be able to construct a decent subscription without having to get into JSON. When I have some time I'll map out some proposals, but just opening this to make sure we track and prioritize. #613 #601 are related - ideas on generating some of the JSON parts.
0.0
e90f3e27e0ce4b9b357566214f866b18a6644772
[ "tests/integration/test_subscriptions_cli.py::test_request_base_success", "tests/integration/test_subscriptions_cli.py::test_request_catalog_success" ]
[ "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options4-0]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[--{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[--{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_describe_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_describe_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options4-0]" ]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-02-27 21:44:19+00:00
apache-2.0
4,600
planetlabs__planet-client-python-880
diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 26b761e..212bdc7 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -133,7 +133,7 @@ def catalog_source( parameters = { "item_types": item_types, "asset_types": asset_types, - "geometry": geometry, + "geometry": geojson.as_geom(geometry), } try:
planetlabs/planet-client-python
60a29560cdacaed036793700a153e710973b1d78
diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index 51e7590..7c0bca2 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -90,6 +90,30 @@ def test_catalog_source_success(geom_geojson): assert res == expected +def test_catalog_source_featurecollection(featurecollection_geojson, + geom_geojson): + '''geojson specified as featurecollection is simplified down to just + the geometry''' + res = subscription_request.catalog_source( + item_types=["PSScene"], + asset_types=["ortho_analytic_4b"], + geometry=featurecollection_geojson, + start_time=datetime(2021, 3, 1), + ) + + expected = { + "type": "catalog", + "parameters": { + "geometry": geom_geojson, + "start_time": "2021-03-01T00:00:00Z", + "item_types": ["PSScene"], + "asset_types": ["ortho_analytic_4b"] + } + } + + assert res == expected + + def test_catalog_source_invalid_start_time(geom_geojson): with pytest.raises(exceptions.ClientError): subscription_request.catalog_source(
invalid geometry entry created by planet subscriptions request-catalog **Expected behavior** Geometry entry in source.json produced by this command should be just the geometry part of `aoi.geojson`, so `"type": "Polygon"` . ```bash planet subscriptions request-catalog \ --item-types PSScene \ --asset-types ortho_analytic_8b_sr,ortho_udm2 \ --geometry aoi.geojson \ --start-time 2022-01-01 > source.json ``` aoi.geojson: ``` { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ 7.05322265625, 46.81509864599243 ], [ 7.580566406250001, 46.81509864599243 ], [ 7.580566406250001, 47.17477833929903 ], [ 7.05322265625, 47.17477833929903 ], [ 7.05322265625, 46.81509864599243 ] ] ] } } ] } ``` **Actual behavior (describe the problem)** source.json includes the entire featureclass. source.json: ``` { "type": "catalog", "parameters": { "item_types": [ "PSScene" ], "asset_types": [ "ortho_analytic_8b_sr", "ortho_udm2" ], "geometry": { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ 7.05322265625, 46.81509864599243 ], [ 7.580566406250001, 46.81509864599243 ], [ 7.580566406250001, 47.17477833929903 ], [ 7.05322265625, 47.17477833929903 ], [ 7.05322265625, 46.81509864599243 ] ] ] } } ] }, "start_time": "2022-01-01T00:00:00Z" } } ``` **Ideas** This could probably be fixed by applying `geojson.to_geometry()` somewhere in the processing chain.
0.0
60a29560cdacaed036793700a153e710973b1d78
[ "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection" ]
[ "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-03-21 18:10:30+00:00
apache-2.0
4,601
planetlabs__planet-client-python-883
diff --git a/CHANGES.txt b/CHANGES.txt index 6ec63b9..4963823 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,9 @@ +2.0.0-rc.3 (TBD) + +Changed: +- Session class unit tests are marked to be run within an event loop and are + no longer skipped (#881). + 2.0.0-rc.2 (2023-03-15) Added: diff --git a/planet/cli/data.py b/planet/cli/data.py index cbca75e..46a9603 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -283,7 +283,8 @@ def filter(ctx, callback=check_item_types) @click.option('--filter', type=types.JSON(), - help='Apply specified filter to search.') + help="""Apply specified filter to search. Can be a json string, + filename, or '-' for stdin.""") @limit @click.option('--name', type=str, help='Name of the saved search.') @click.option('--sort', @@ -321,32 +322,35 @@ async def search(ctx, item_types, filter, limit, name, sort, pretty): @click.pass_context @translate_exceptions @coro [email protected]('name') @click.argument("item_types", type=types.CommaSeparatedString(), callback=check_item_types) [email protected]("filter", type=types.JSON()) [email protected]( + '--filter', + type=types.JSON(), + required=True, + help="""Filter to apply to search. Can be a json string, filename, + or '-' for stdin.""") [email protected]('--name', + type=str, + required=True, + help='Name of the saved search.') @click.option('--daily-email', is_flag=True, help='Send a daily email when new results are added.') @pretty -async def search_create(ctx, name, item_types, filter, daily_email, pretty): +async def search_create(ctx, item_types, filter, name, daily_email, pretty): """Create a new saved structured item search. This function outputs a full JSON description of the created search, optionally pretty-printed. - NAME is the name to give the search. - ITEM_TYPES is a comma-separated list of item-types to search. - - FILTER must be JSON and can be specified a json string, filename, or '-' - for stdin. """ async with data_client(ctx) as cl: - items = await cl.create_search(name=name, - item_types=item_types, + items = await cl.create_search(item_types=item_types, search_filter=filter, + name=name, enable_email=daily_email) echo_json(items, pretty) diff --git a/planet/clients/data.py b/planet/clients/data.py index 82c22c2..2e80dd0 100644 --- a/planet/clients/data.py +++ b/planet/clients/data.py @@ -167,9 +167,9 @@ class DataClient: yield i async def create_search(self, - name: str, item_types: List[str], search_filter: dict, + name: str, enable_email: bool = False) -> dict: """Create a new saved structured item search. @@ -190,9 +190,9 @@ class DataClient: Parameters: - name: The name of the saved search. item_types: The item types to include in the search. search_filter: Structured search criteria. + name: The name of the saved search. enable_email: Send a daily email when new results are added. Returns: diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 26b761e..212bdc7 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -133,7 +133,7 @@ def catalog_source( parameters = { "item_types": item_types, "asset_types": asset_types, - "geometry": geometry, + "geometry": geojson.as_geom(geometry), } try:
planetlabs/planet-client-python
60a29560cdacaed036793700a153e710973b1d78
diff --git a/tests/integration/test_data_api.py b/tests/integration/test_data_api.py index 736d751..8a2c500 100644 --- a/tests/integration/test_data_api.py +++ b/tests/integration/test_data_api.py @@ -245,7 +245,9 @@ async def test_create_search_basic(search_filter, session): respx.post(TEST_SEARCHES_URL).return_value = mock_resp cl = DataClient(session, base_url=TEST_URL) - search = await cl.create_search('test', ['PSScene'], search_filter) + search = await cl.create_search(['PSScene'], + search_filter=search_filter, + name='test') # check that request is correct expected_request = { @@ -281,8 +283,9 @@ async def test_create_search_email(search_filter, session): respx.post(TEST_SEARCHES_URL).return_value = mock_resp cl = DataClient(session, base_url=TEST_URL) - search = await cl.create_search('test', ['PSScene'], - search_filter, + search = await cl.create_search(['PSScene'], + search_filter=search_filter, + name='test', enable_email=True) # check that request is correct diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 6150d25..47265bd 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -572,7 +572,13 @@ def test_data_search_create_filter_invalid_json(invoke, item_types, filter): name = "temp" runner = CliRunner() - result = invoke(["search-create", name, item_types, filter], runner=runner) + result = invoke([ + "search-create", + item_types, + f'--name={name}', + f'--filter={json.dumps(filter)}' + ], + runner=runner) assert result.exit_code == 2 @@ -599,7 +605,12 @@ def test_data_search_create_filter_success(invoke, item_types): respx.post(TEST_SEARCHES_URL).return_value = mock_resp runner = CliRunner() - result = invoke(["search-create", name, item_types, json.dumps(filter)], + result = invoke([ + "search-create", + item_types, + f'--filter={json.dumps(filter)}', + f'--name={name}' + ], runner=runner) assert result.exit_code == 0 @@ -621,9 +632,9 @@ def test_data_search_create_daily_email(invoke, search_result): result = invoke([ 'search-create', - 'temp', 'SkySatScene', - json.dumps(filter), + '--name=temp', + f'--filter={json.dumps(filter)}', '--daily-email' ]) diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 5520dfd..ed631bd 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -6,6 +6,7 @@ from planet import DataClient, OrdersClient, SubscriptionsClient, Session from planet.exceptions import ClientError [email protected] @pytest.mark.parametrize("client_name,client_class", [('data', DataClient), ('orders', OrdersClient), ('subscriptions', SubscriptionsClient)]) @@ -16,6 +17,7 @@ async def test_session_get_client(client_name, client_class): assert isinstance(client, client_class) [email protected] async def test_session_get_client_error(): """Get an exception when no such client exists.""" async with Session() as session: diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index 51e7590..7c0bca2 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -90,6 +90,30 @@ def test_catalog_source_success(geom_geojson): assert res == expected +def test_catalog_source_featurecollection(featurecollection_geojson, + geom_geojson): + '''geojson specified as featurecollection is simplified down to just + the geometry''' + res = subscription_request.catalog_source( + item_types=["PSScene"], + asset_types=["ortho_analytic_4b"], + geometry=featurecollection_geojson, + start_time=datetime(2021, 3, 1), + ) + + expected = { + "type": "catalog", + "parameters": { + "geometry": geom_geojson, + "start_time": "2021-03-01T00:00:00Z", + "item_types": ["PSScene"], + "asset_types": ["ortho_analytic_4b"] + } + } + + assert res == expected + + def test_catalog_source_invalid_start_time(geom_geojson): with pytest.raises(exceptions.ClientError): subscription_request.catalog_source(
Update `search-create` CLI and python API commands to match `search` **Expected behavior** UI of `planet data search-create` and `planet data search` should be similar. **Actual behavior (describe the problem)** `planet data search` has filter and name as options, `planet data search-create` has name and filter as arguments. **Proposed Solution** Update `search-create` CLI and python API commands to match `search`.
0.0
60a29560cdacaed036793700a153e710973b1d78
[ "tests/integration/test_data_api.py::test_create_search_basic", "tests/integration/test_data_api.py::test_create_search_email", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_daily_email", "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection" ]
[ "tests/integration/test_data_api.py::test_search_basic", "tests/integration/test_data_api.py::test_search_name", "tests/integration/test_data_api.py::test_search_filter", "tests/integration/test_data_api.py::test_search_sort", "tests/integration/test_data_api.py::test_search_limit", "tests/integration/test_data_api.py::test_get_search_success", "tests/integration/test_data_api.py::test_get_search_id_doesnt_exist", "tests/integration/test_data_api.py::test_update_search_basic", "tests/integration/test_data_api.py::test_list_searches_success[None-4]", "tests/integration/test_data_api.py::test_list_searches_success[3-3]", "tests/integration/test_data_api.py::test_list_searches_sort[created", "tests/integration/test_data_api.py::test_list_searches_searchtype[any-]", "tests/integration/test_data_api.py::test_list_searches_searchtype[saved-?search_type=saved]", "tests/integration/test_data_api.py::test_list_searches_args_do_not_match[DOESNOTEXIST-ANY-expectation0]", "tests/integration/test_data_api.py::test_list_searches_args_do_not_match[CREATED", "tests/integration/test_data_api.py::test_delete_search[204-expectation0]", "tests/integration/test_data_api.py::test_delete_search[404-expectation1]", "tests/integration/test_data_api.py::test_run_search_basic[None-3-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_api.py::test_run_search_basic[None-3-invalid-False]", "tests/integration/test_data_api.py::test_run_search_basic[2-2-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_api.py::test_run_search_basic[2-2-invalid-False]", "tests/integration/test_data_api.py::test_run_search_sort[published", "tests/integration/test_data_api.py::test_run_search_sort[acquired", "tests/integration/test_data_api.py::test_run_search_sort[invalid--False]", "tests/integration/test_data_api.py::test_run_search_doesnotexist", "tests/integration/test_data_api.py::test_get_stats_success", "tests/integration/test_data_api.py::test_get_stats_invalid_interval", "tests/integration/test_data_api.py::test_list_item_assets_success", "tests/integration/test_data_api.py::test_list_item_assets_missing", "tests/integration/test_data_api.py::test_get_asset[basic_udm2-expectation0]", "tests/integration/test_data_api.py::test_get_asset[invalid-expectation1]", "tests/integration/test_data_api.py::test_activate_asset_success[inactive-True]", "tests/integration/test_data_api.py::test_activate_asset_success[active-False]", "tests/integration/test_data_api.py::test_activate_asset_invalid_asset", "tests/integration/test_data_api.py::test_wait_asset_success", "tests/integration/test_data_api.py::test_wait_asset_max_attempts", "tests/integration/test_data_api.py::test_download_asset[False-False]", "tests/integration/test_data_api.py::test_download_asset[True-False]", "tests/integration/test_data_api.py::test_download_asset[True-True]", "tests/integration/test_data_api.py::test_download_asset[False-True]", "tests/integration/test_data_api.py::test_validate_checksum[True-True-expectation0]", "tests/integration/test_data_api.py::test_validate_checksum[False-True-expectation1]", "tests/integration/test_data_api.py::test_validate_checksum[True-False-expectation2]", "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_data_search_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults", "tests/integration/test_data_cli.py::test_data_filter_permission", "tests/integration/test_data_cli.py::test_data_filter_std_quality", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[PSScene-True]", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[SkySatScene-True]", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[INVALID-False]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_list_basic[0-4]", "tests/integration/test_data_cli.py::test_data_search_list_basic[3-3]", "tests/integration/test_data_cli.py::test_data_search_list_sort[created", "tests/integration/test_data_cli.py::test_data_search_list_sort[notvalid--False]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[any--True]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[saved-?search_type=saved-True]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[notvalid--False]", "tests/integration/test_data_cli.py::test_data_search_run_basic[0-3-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_cli.py::test_data_search_run_basic[0-3-invalid-False]", "tests/integration/test_data_cli.py::test_data_search_run_basic[2-2-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_cli.py::test_data_search_run_basic[2-2-invalid-False]", "tests/integration/test_data_cli.py::test_data_search_run_sort[published", "tests/integration/test_data_cli.py::test_data_search_run_sort[acquired", "tests/integration/test_data_cli.py::test_data_search_run_sort[invalid--False]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{1:1}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[None-1-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hou-2-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_invalid_interval[hour-0-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene,", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found", "tests/integration/test_data_cli.py::test_search_delete_success", "tests/integration/test_data_cli.py::test_search_delete_nonexistant_search_id", "tests/integration/test_data_cli.py::test_search_update_success[PSScene]", "tests/integration/test_data_cli.py::test_search_update_success[SkySatScene]", "tests/integration/test_data_cli.py::test_search_update_success[PSScene,", "tests/integration/test_data_cli.py::test_search_update_fail", "tests/integration/test_data_cli.py::test_asset_download_default[False-False]", "tests/integration/test_data_cli.py::test_asset_download_default[True-False]", "tests/integration/test_data_cli.py::test_asset_download_default[True-True]", "tests/integration/test_data_cli.py::test_asset_download_default[False-True]", "tests/integration/test_data_cli.py::test_asset_activate", "tests/integration/test_data_cli.py::test_asset_wait", "tests/unit/test_session.py::test_session_get_client[data-DataClient]", "tests/unit/test_session.py::test_session_get_client[orders-OrdersClient]", "tests/unit/test_session.py::test_session_get_client[subscriptions-SubscriptionsClient]", "tests/unit/test_session.py::test_session_get_client_error", "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-03-21 19:51:16+00:00
apache-2.0
4,602
planetlabs__planet-client-python-894
diff --git a/planet/cli/data.py b/planet/cli/data.py index 46a9603..2591b32 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -418,19 +418,27 @@ async def search_run(ctx, search_id, sort, limit, pretty): @click.argument("item_types", type=types.CommaSeparatedString(), callback=check_item_types) [email protected]('interval', type=click.Choice(STATS_INTERVAL)) [email protected]("filter", type=types.JSON()) -async def stats(ctx, item_types, interval, filter): [email protected]( + '--filter', + type=types.JSON(), + required=True, + help="""Filter to apply to search. Can be a json string, filename, + or '-' for stdin.""") [email protected]('--interval', + type=click.Choice(STATS_INTERVAL), + required=True, + help='The size of the histogram date buckets.') +async def stats(ctx, item_types, filter, interval): """Get a bucketed histogram of items matching the filter. This function returns a bucketed histogram of results based on the - item_types, interval, and json filter specified (using file or stdin). + item_types, interval, and filter specified. """ async with data_client(ctx) as cl: items = await cl.get_stats(item_types=item_types, - interval=interval, - search_filter=filter) + search_filter=filter, + interval=interval) echo_json(items)
planetlabs/planet-client-python
c28285bf2b7ff3b2a20688371e3ec0dc834eada7
diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 3016472..3bddc70 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -771,16 +771,18 @@ def test_data_stats_invalid_filter(invoke, filter): interval = "hour" item_type = 'PSScene' runner = CliRunner() - result = invoke(["stats", item_type, interval, filter], runner=runner) + result = invoke( + ["stats", item_type, f'--interval={interval}', f'--filter={filter}'], + runner=runner) assert result.exit_code == 2 @respx.mock @pytest.mark.parametrize("item_types", ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) [email protected]("interval, exit_code", [(None, 1), ('hou', 2), [email protected]("interval, exit_code", [(None, 2), ('hou', 2), ('hour', 0)]) -def test_data_stats_invalid_interval(invoke, item_types, interval, exit_code): +def test_data_stats_interval(invoke, item_types, interval, exit_code): """Test for planet data stats. Test with multiple item_types. Test should succeed with valid interval, and fail with invalid interval.""" filter = { @@ -797,10 +799,11 @@ def test_data_stats_invalid_interval(invoke, item_types, interval, exit_code): }]}) respx.post(TEST_STATS_URL).return_value = mock_resp - runner = CliRunner() - result = invoke(["stats", item_types, interval, json.dumps(filter)], - runner=runner) + args = ["stats", item_types, f'--filter={json.dumps(filter)}'] + if interval: + args.append(f'--interval={interval}') + result = invoke(args) assert result.exit_code == exit_code @@ -828,9 +831,12 @@ def test_data_stats_success(invoke, item_types, interval): }]}) respx.post(TEST_STATS_URL).return_value = mock_resp - runner = CliRunner() - result = invoke(["stats", item_types, interval, json.dumps(filter)], - runner=runner) + result = invoke([ + "stats", + item_types, + f'--interval={interval}', + f'--filter={json.dumps(filter)}' + ]) assert result.exit_code == 0
Update `planet data stats` CLI and python API commands to match `search` **Expected behavior** UI of `planet data stats` and `planet data search` should be similar. **Actual behavior (describe the problem)** `planet data search` has filter as an option (`--filter`), `planet data stats` has interval and filter as arguments. **Proposed Solution** Update `stats` CLI and python API commands to match `search`. Expected command would be `planet data stats PSScene --filter - --interval day` (assuming a filter is piped in).
0.0
c28285bf2b7ff3b2a20688371e3ec0dc834eada7
[ "tests/integration/test_data_cli.py::test_data_stats_interval[hour-0-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hour-0-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hour-0-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene," ]
[ "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_data_search_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults", "tests/integration/test_data_cli.py::test_data_filter_permission", "tests/integration/test_data_cli.py::test_data_filter_std_quality", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[PSScene-True]", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[SkySatScene-True]", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[INVALID-False]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_daily_email", "tests/integration/test_data_cli.py::test_data_search_list_basic[0-4]", "tests/integration/test_data_cli.py::test_data_search_list_basic[3-3]", "tests/integration/test_data_cli.py::test_data_search_list_sort[created", "tests/integration/test_data_cli.py::test_data_search_list_sort[notvalid--False]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[any--True]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[saved-?search_type=saved-True]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[notvalid--False]", "tests/integration/test_data_cli.py::test_data_search_run_basic[0-3-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_cli.py::test_data_search_run_basic[0-3-invalid-False]", "tests/integration/test_data_cli.py::test_data_search_run_basic[2-2-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_cli.py::test_data_search_run_basic[2-2-invalid-False]", "tests/integration/test_data_cli.py::test_data_search_run_sort[published", "tests/integration/test_data_cli.py::test_data_search_run_sort[acquired", "tests/integration/test_data_cli.py::test_data_search_run_sort[invalid--False]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{1:1}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_stats_interval[None-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[None-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[None-2-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_interval[hou-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hou-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hou-2-PSScene,", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found", "tests/integration/test_data_cli.py::test_search_delete_success", "tests/integration/test_data_cli.py::test_search_delete_nonexistant_search_id", "tests/integration/test_data_cli.py::test_search_update_success[PSScene]", "tests/integration/test_data_cli.py::test_search_update_success[SkySatScene]", "tests/integration/test_data_cli.py::test_search_update_success[PSScene,", "tests/integration/test_data_cli.py::test_search_update_fail", "tests/integration/test_data_cli.py::test_asset_download_default[False-False]", "tests/integration/test_data_cli.py::test_asset_download_default[True-False]", "tests/integration/test_data_cli.py::test_asset_download_default[True-True]", "tests/integration/test_data_cli.py::test_asset_download_default[False-True]", "tests/integration/test_data_cli.py::test_asset_activate", "tests/integration/test_data_cli.py::test_asset_wait" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2023-03-23 01:12:11+00:00
apache-2.0
4,603
planetlabs__planet-client-python-896
diff --git a/planet/cli/data.py b/planet/cli/data.py index 46a9603..2591b32 100644 --- a/planet/cli/data.py +++ b/planet/cli/data.py @@ -418,19 +418,27 @@ async def search_run(ctx, search_id, sort, limit, pretty): @click.argument("item_types", type=types.CommaSeparatedString(), callback=check_item_types) [email protected]('interval', type=click.Choice(STATS_INTERVAL)) [email protected]("filter", type=types.JSON()) -async def stats(ctx, item_types, interval, filter): [email protected]( + '--filter', + type=types.JSON(), + required=True, + help="""Filter to apply to search. Can be a json string, filename, + or '-' for stdin.""") [email protected]('--interval', + type=click.Choice(STATS_INTERVAL), + required=True, + help='The size of the histogram date buckets.') +async def stats(ctx, item_types, filter, interval): """Get a bucketed histogram of items matching the filter. This function returns a bucketed histogram of results based on the - item_types, interval, and json filter specified (using file or stdin). + item_types, interval, and filter specified. """ async with data_client(ctx) as cl: items = await cl.get_stats(item_types=item_types, - interval=interval, - search_filter=filter) + search_filter=filter, + interval=interval) echo_json(items) diff --git a/planet/clients/orders.py b/planet/clients/orders.py index 584df8d..e5e8334 100644 --- a/planet/clients/orders.py +++ b/planet/clients/orders.py @@ -265,8 +265,7 @@ class OrdersClient: order_id: str, directory: Path = Path('.'), overwrite: bool = False, - progress_bar: bool = False, - checksum: Optional[str] = None) -> List[Path]: + progress_bar: bool = False) -> List[Path]: """Download all assets in an order. Parameters:
planetlabs/planet-client-python
c28285bf2b7ff3b2a20688371e3ec0dc834eada7
diff --git a/tests/integration/test_data_cli.py b/tests/integration/test_data_cli.py index 3016472..3bddc70 100644 --- a/tests/integration/test_data_cli.py +++ b/tests/integration/test_data_cli.py @@ -771,16 +771,18 @@ def test_data_stats_invalid_filter(invoke, filter): interval = "hour" item_type = 'PSScene' runner = CliRunner() - result = invoke(["stats", item_type, interval, filter], runner=runner) + result = invoke( + ["stats", item_type, f'--interval={interval}', f'--filter={filter}'], + runner=runner) assert result.exit_code == 2 @respx.mock @pytest.mark.parametrize("item_types", ['PSScene', 'SkySatScene', 'PSScene, SkySatScene']) [email protected]("interval, exit_code", [(None, 1), ('hou', 2), [email protected]("interval, exit_code", [(None, 2), ('hou', 2), ('hour', 0)]) -def test_data_stats_invalid_interval(invoke, item_types, interval, exit_code): +def test_data_stats_interval(invoke, item_types, interval, exit_code): """Test for planet data stats. Test with multiple item_types. Test should succeed with valid interval, and fail with invalid interval.""" filter = { @@ -797,10 +799,11 @@ def test_data_stats_invalid_interval(invoke, item_types, interval, exit_code): }]}) respx.post(TEST_STATS_URL).return_value = mock_resp - runner = CliRunner() - result = invoke(["stats", item_types, interval, json.dumps(filter)], - runner=runner) + args = ["stats", item_types, f'--filter={json.dumps(filter)}'] + if interval: + args.append(f'--interval={interval}') + result = invoke(args) assert result.exit_code == exit_code @@ -828,9 +831,12 @@ def test_data_stats_success(invoke, item_types, interval): }]}) respx.post(TEST_STATS_URL).return_value = mock_resp - runner = CliRunner() - result = invoke(["stats", item_types, interval, json.dumps(filter)], - runner=runner) + result = invoke([ + "stats", + item_types, + f'--interval={interval}', + f'--filter={json.dumps(filter)}' + ]) assert result.exit_code == 0
Remove checksum argument from orders.download_order() `orders.download_order()` has a `checksum` argument, which is never used, which may be misleading to a user if they mark `checksum=True`. - In the SDK, the checksum is done in a separate call (e.g., `orders.download_order()` then `orders.validate_checksum()`). - In the CLI, the checksum is just a flag baked into the CLI's functionality. **To do:** Remove `checksum` argument from `orders.download_order()` to avoid confusion.
0.0
c28285bf2b7ff3b2a20688371e3ec0dc834eada7
[ "tests/integration/test_data_cli.py::test_data_stats_interval[hour-0-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hour-0-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hour-0-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[hour-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[day-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[week-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[month-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_success[year-PSScene," ]
[ "tests/integration/test_data_cli.py::test_data_command_registered", "tests/integration/test_data_cli.py::test_data_search_command_registered", "tests/integration/test_data_cli.py::test_data_filter_defaults", "tests/integration/test_data_cli.py::test_data_filter_permission", "tests/integration/test_data_cli.py::test_data_filter_std_quality", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr-expected0]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr,ortho_analytic_4b_sr-expected1]", "tests/integration/test_data_cli.py::test_data_filter_asset[ortho_analytic_8b_sr", "tests/integration/test_data_cli.py::test_data_filter_date_range_success", "tests/integration/test_data_cli.py::test_data_filter_date_range_invalid", "tests/integration/test_data_cli.py::test_data_filter_geom[geom_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[feature_geojson]", "tests/integration/test_data_cli.py::test_data_filter_geom[featurecollection_geojson]", "tests/integration/test_data_cli.py::test_data_filter_number_in_success", "tests/integration/test_data_cli.py::test_data_filter_number_in_badparam", "tests/integration/test_data_cli.py::test_data_filter_range", "tests/integration/test_data_cli.py::test_data_filter_string_in", "tests/integration/test_data_cli.py::test_data_filter_update", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[PSScene-True]", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[SkySatScene-True]", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[PSScene,", "tests/integration/test_data_cli.py::test_data_search_cmd_item_types[INVALID-False]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[{1:1}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_invalid_json[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_cmd_filter_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_success", "tests/integration/test_data_cli.py::test_data_search_cmd_sort_invalid", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[None-100]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[0-102]", "tests/integration/test_data_cli.py::test_data_search_cmd_limit[1-1]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{1:1}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[SkySatScene-{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_search_create_filter_invalid_json[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[SkySatScene]", "tests/integration/test_data_cli.py::test_data_search_create_filter_success[PSScene,", "tests/integration/test_data_cli.py::test_data_search_create_daily_email", "tests/integration/test_data_cli.py::test_data_search_list_basic[0-4]", "tests/integration/test_data_cli.py::test_data_search_list_basic[3-3]", "tests/integration/test_data_cli.py::test_data_search_list_sort[created", "tests/integration/test_data_cli.py::test_data_search_list_sort[notvalid--False]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[any--True]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[saved-?search_type=saved-True]", "tests/integration/test_data_cli.py::test_data_search_list_searchtype[notvalid--False]", "tests/integration/test_data_cli.py::test_data_search_run_basic[0-3-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_cli.py::test_data_search_run_basic[0-3-invalid-False]", "tests/integration/test_data_cli.py::test_data_search_run_basic[2-2-286469f0b27c476e96c3c4e561f59664-True]", "tests/integration/test_data_cli.py::test_data_search_run_basic[2-2-invalid-False]", "tests/integration/test_data_cli.py::test_data_search_run_sort[published", "tests/integration/test_data_cli.py::test_data_search_run_sort[acquired", "tests/integration/test_data_cli.py::test_data_search_run_sort[invalid--False]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{1:1}]", "tests/integration/test_data_cli.py::test_data_stats_invalid_filter[{\"foo\"}]", "tests/integration/test_data_cli.py::test_data_stats_interval[None-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[None-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[None-2-PSScene,", "tests/integration/test_data_cli.py::test_data_stats_interval[hou-2-PSScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hou-2-SkySatScene]", "tests/integration/test_data_cli.py::test_data_stats_interval[hou-2-PSScene,", "tests/integration/test_data_cli.py::test_search_get", "tests/integration/test_data_cli.py::test_search_get_id_not_found", "tests/integration/test_data_cli.py::test_search_delete_success", "tests/integration/test_data_cli.py::test_search_delete_nonexistant_search_id", "tests/integration/test_data_cli.py::test_search_update_success[PSScene]", "tests/integration/test_data_cli.py::test_search_update_success[SkySatScene]", "tests/integration/test_data_cli.py::test_search_update_success[PSScene,", "tests/integration/test_data_cli.py::test_search_update_fail", "tests/integration/test_data_cli.py::test_asset_download_default[False-False]", "tests/integration/test_data_cli.py::test_asset_download_default[True-False]", "tests/integration/test_data_cli.py::test_asset_download_default[True-True]", "tests/integration/test_data_cli.py::test_asset_download_default[False-True]", "tests/integration/test_data_cli.py::test_asset_activate", "tests/integration/test_data_cli.py::test_asset_wait" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2023-03-23 17:39:45+00:00
apache-2.0
4,604
planetlabs__planet-client-python-977
diff --git a/CHANGES.txt b/CHANGES.txt index 2255bc0..105eff2 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,7 +1,10 @@ 2.1.0 (TBD) Added: -- Add the option to get Planetary Variable subscription results as a CSV file (). +- Support for catalog source publishing stages has been added to + subscription_request.catalog_source (#977). +- Add the option to get Planetary Variable subscription results as a CSV file + (#981). - A subscription_request.planetary_variable_source function has been added (#976). - The subscription_request.build_request function has a new option to clip to diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index 772894c..281041a 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -85,3 +85,6 @@ section.mdx-container::before{ } } +.highlight .gp, .highlight .go { /* Generic.Prompt, Generic.Output */ + user-select: none; + } \ No newline at end of file diff --git a/planet/subscription_request.py b/planet/subscription_request.py index f0e7d8d..4d494ec 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -13,7 +13,7 @@ # the License. """Functionality for preparing subscription requests.""" from datetime import datetime -from typing import Any, Dict, Optional, List, Literal, Mapping +from typing import Any, Dict, Optional, List, Literal, Mapping, Sequence from . import geojson, specs from .exceptions import ClientError @@ -151,6 +151,9 @@ def catalog_source( filter: Optional[Mapping] = None, end_time: Optional[datetime] = None, rrule: Optional[str] = None, + publishing_stages: Optional[Sequence[Literal["preview", + "standard", + "finalized"]]] = None, ) -> dict: """Construct a Catalog subscription source. @@ -173,6 +176,8 @@ def catalog_source( the past or future, and must be after the start_time. rrule: The recurrence rule, given in iCalendar RFC 5545 format. Only monthly recurrences are supported at this time. + publishing_stages: A sequence of one or more of the values + "preview", "standard", or "finalized". Returns: dict: a representation of a subscription source. @@ -180,6 +185,29 @@ def catalog_source( Raises: ClientError: if a source can not be configured. + + Examples: + ```pycon + >>> source = catalog_source( + ... ["PSScene"], + ... ["ortho_analytic_4b"], + ... geometry={ + ... "type": "Polygon", + ... "coordinates": [[[37.791595458984375, 14.84923123791421], + ... [37.90214538574219, 14.84923123791421], + ... [37.90214538574219, 14.945448293647944], + ... [37.791595458984375, 14.945448293647944], + ... [37.791595458984375, 14.84923123791421]]] + ... }, + ... start_time=datetime(2021, 3, 1), + ... publishing_stages=["standard"], + ... ) + >>> request = build_request( + ... "Standard PSScene Ortho Analytic", + ... source=source, + ... delivery={}) + ``` + """ if len(item_types) > 1: raise ClientError( @@ -216,6 +244,9 @@ def catalog_source( if rrule: parameters['rrule'] = rrule + if publishing_stages: + parameters['publishing_stages'] = list(set(publishing_stages)) + return {"type": "catalog", "parameters": parameters} @@ -275,7 +306,10 @@ def planetary_variable_source( ... }, ... start_time=datetime(2021, 3, 1) ... ) - >>> request = build_request(source=source, ...) + >>> request = build_request( + ... "Soil Water Content", + ... source=source, + ... delivery={}) ``` """ # TODO: validation of variable types and ids.
planetlabs/planet-client-python
30cb1c0adb8349d9e3ea6831a70f61924f0e560f
diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index d24dbd3..34cd5fa 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime +import itertools import logging import pytest @@ -369,3 +370,27 @@ def test_pv_source_success(geom_geojson, var_type, var_id): assert params["id"] == var_id assert params["geometry"] == geom_geojson assert params["start_time"].startswith("2021-03-01") + + [email protected]( + # Test all the combinations of the three options plus some with dupes. + "publishing_stages", + list( + itertools.chain.from_iterable( + itertools.combinations(["preview", "standard", "finalized"], i) + for i in range(1, 4))) + [("preview", "preview"), + ("preview", "finalized", "preview")]) +def test_catalog_source_publishing_stages(publishing_stages, geom_geojson): + """Configure publishing stages for a catalog source.""" + source = subscription_request.catalog_source( + item_types=["PSScene"], + asset_types=["ortho_analytic_4b"], + geometry=geom_geojson, + start_time=datetime(2021, 3, 1), + end_time=datetime(2023, 11, 1), + rrule="FREQ=MONTHLY;BYMONTH=3,4,5,6,7,8,9,10", + publishing_stages=publishing_stages, + ) + + assert source["parameters"]["publishing_stages"] == list( + set(publishing_stages))
Add publishing_stages to catalog source parameters It's a list of choices from "preview", "standard", or "finalized". See https://developers.planet.com/docs/subscriptions/reference/#tag/subscriptions/operation/createSubscription.
0.0
30cb1c0adb8349d9e3ea6831a70f61924f0e560f
[ "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages0]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages1]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages2]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages3]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages4]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages5]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages6]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages7]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages8]" ]
[ "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_failure", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success", "tests/unit/test_subscription_request.py::test_pv_source_success[biomass_proxy-BIOMASS-PROXY_V3.0_10]", "tests/unit/test_subscription_request.py::test_pv_source_success[var1-VAR1-ABCD]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-07 22:07:02+00:00
apache-2.0
4,605
planetlabs__planet-client-python-978
diff --git a/CHANGES.txt b/CHANGES.txt index 105eff2..2c6d297 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,8 +1,8 @@ 2.1.0 (TBD) Added: -- Support for catalog source publishing stages has been added to - subscription_request.catalog_source (#977). +- Support for catalog source publishing stages (#977) and time range types + (#978) have been added to subscription_request.catalog_source. - Add the option to get Planetary Variable subscription results as a CSV file (#981). - A subscription_request.planetary_variable_source function has been added diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 4d494ec..75db02a 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -154,6 +154,7 @@ def catalog_source( publishing_stages: Optional[Sequence[Literal["preview", "standard", "finalized"]]] = None, + time_range_type: Optional[Literal["acquired", "published"]] = None, ) -> dict: """Construct a Catalog subscription source. @@ -201,6 +202,7 @@ def catalog_source( ... }, ... start_time=datetime(2021, 3, 1), ... publishing_stages=["standard"], + ... time_range_type="acquired", ... ) >>> request = build_request( ... "Standard PSScene Ortho Analytic", @@ -247,6 +249,9 @@ def catalog_source( if publishing_stages: parameters['publishing_stages'] = list(set(publishing_stages)) + if time_range_type: + parameters['time_range_type'] = time_range_type + return {"type": "catalog", "parameters": parameters}
planetlabs/planet-client-python
699f15b9b523fddd942d4d87bb3a57c6a00f5ffd
diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index 34cd5fa..1b09afa 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -394,3 +394,16 @@ def test_catalog_source_publishing_stages(publishing_stages, geom_geojson): assert source["parameters"]["publishing_stages"] == list( set(publishing_stages)) + + +def test_catalog_source_time_range_type_acquired(geom_geojson): + """Configure 'acquired' time range type for a catalog source.""" + source = subscription_request.catalog_source( + item_types=["PSScene"], + asset_types=["ortho_analytic_4b"], + geometry=geom_geojson, + start_time=datetime(2021, 3, 1), + time_range_type="acquired", + ) + + assert source["parameters"]["time_range_type"] == "acquired"
Add time_range_type parameter to subscription source params The new parameter is a string and one of "acquired", or "published". This is a new API feature. Previously, "published" was the only option.
0.0
699f15b9b523fddd942d4d87bb3a57c6a00f5ffd
[ "tests/unit/test_subscription_request.py::test_catalog_source_time_range_type_acquired" ]
[ "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_failure", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success", "tests/unit/test_subscription_request.py::test_pv_source_success[biomass_proxy-BIOMASS-PROXY_V3.0_10]", "tests/unit/test_subscription_request.py::test_pv_source_success[var1-VAR1-ABCD]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages0]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages1]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages2]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages3]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages4]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages5]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages6]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages7]", "tests/unit/test_subscription_request.py::test_catalog_source_publishing_stages[publishing_stages8]" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-07 23:51:21+00:00
apache-2.0
4,606
planetlabs__planet-client-python-981
diff --git a/CHANGES.txt b/CHANGES.txt index 44c93cc..2255bc0 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,9 +1,12 @@ 2.1.0 (TBD) Added: +- Add the option to get Planetary Variable subscription results as a CSV file (). +- A subscription_request.planetary_variable_source function has been added + (#976). - The subscription_request.build_request function has a new option to clip to - the subscription's source geometry. This is a preview of the default - behavior of the next version of the Subscriptions API. + the subscription's source geometry. This is a preview of the default behavior + of the next version of the Subscriptions API (#971). 2.0.3 (2023-06-28) diff --git a/planet/cli/subscriptions.py b/planet/cli/subscriptions.py index f97671e..6816d15 100644 --- a/planet/cli/subscriptions.py +++ b/planet/cli/subscriptions.py @@ -162,10 +162,13 @@ async def get_subscription_cmd(ctx, subscription_id, pretty): "success"]), multiple=True, default=None, - callback=lambda ctx, - param, - value: set(value), + callback=(lambda ctx, param, value: set(value)), help="Select subscription results in one or more states. Default: all.") [email protected]('--csv', + 'csv_flag', + is_flag=True, + default=False, + help="Get subscription results as an unpaged CSV file.") @limit # TODO: the following 3 options. # –created: timestamp instant or range. @@ -178,13 +181,20 @@ async def list_subscription_results_cmd(ctx, subscription_id, pretty, status, + csv_flag, limit): """Gets results of a subscription and prints the API response.""" async with subscriptions_client(ctx) as client: - async for result in client.get_results(subscription_id, - status=status, - limit=limit): - echo_json(result, pretty) + if csv_flag: + async for result in client.get_results_csv(subscription_id, + status=status, + limit=limit): + click.echo(result) + else: + async for result in client.get_results(subscription_id, + status=status, + limit=limit): + echo_json(result, pretty) @subscriptions.command() # type: ignore diff --git a/planet/clients/subscriptions.py b/planet/clients/subscriptions.py index 1e44485..99a93e2 100644 --- a/planet/clients/subscriptions.py +++ b/planet/clients/subscriptions.py @@ -1,7 +1,7 @@ """Planet Subscriptions API Python client.""" import logging -from typing import AsyncIterator, Optional, Set +from typing import AsyncIterator, Literal, Optional, Sequence from planet.exceptions import APIError, ClientError from planet.http import Session @@ -58,7 +58,7 @@ class SubscriptionsClient: self._base_url = self._base_url[:-1] async def list_subscriptions(self, - status: Optional[Set[str]] = None, + status: Optional[Sequence[str]] = None, limit: int = 100) -> AsyncIterator[dict]: """Iterate over list of account subscriptions with optional filtering. @@ -216,16 +216,21 @@ class SubscriptionsClient: async def get_results(self, subscription_id: str, - status: Optional[Set[str]] = None, + status: Optional[Sequence[Literal[ + "created", + "queued", + "processing", + "failed", + "success"]]] = None, limit: int = 100) -> AsyncIterator[dict]: """Iterate over results of a Subscription. - Note: + Notes: The name of this method is based on the API's method name. This method provides iteration over results, it does not get a single result description or return a list of descriptions. - Args: + Parameters: subscription_id (str): id of a subscription. status (Set[str]): pass result with status in this set, filter out results with status not in this set. @@ -252,7 +257,6 @@ class SubscriptionsClient: resp = await self._session.request(method='GET', url=url, params=params) - async for sub in _ResultsPager(resp, self._session.request, limit=limit): @@ -263,3 +267,45 @@ class SubscriptionsClient: raise except ClientError: # pragma: no cover raise + + async def get_results_csv(self, + subscription_id: str, + status: Optional[Sequence[Literal[ + "created", + "queued", + "processing", + "failed", + "success"]]] = None, + **kwargs) -> AsyncIterator[str]: + """Iterate over rows of results CSV for a Subscription. + + Notes: + The name of this method is based on the API's method name. This + method provides iteration over results, it does not get a + single result description or return a list of descriptions. + + Parameters: + subscription_id (str): id of a subscription. + status (Set[str]): pass result with status in this set, + filter out results with status not in this set. + TODO: created, updated, completed, user_id + + Yields: + str: a row from a CSV file. + + Raises: + APIError: on an API server error. + ClientError: on a client error. + """ + url = f'{self._base_url}/{subscription_id}/results' + params = {'status': [val for val in status or {}], 'format': 'csv'} + + # Note: retries are not implemented yet. This project has + # retry logic for HTTP requests, but does not handle errors + # during streaming. We may want to consider a retry decorator + # for this entire method a la stamina: + # https://github.com/hynek/stamina. + async with self._session._client.stream('GET', url, + params=params) as response: + async for line in response.aiter_lines(): + yield line diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 391f168..f0e7d8d 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -13,7 +13,7 @@ # the License. """Functionality for preparing subscription requests.""" from datetime import datetime -from typing import Any, Dict, Optional, List, Mapping +from typing import Any, Dict, Optional, List, Literal, Mapping from . import geojson, specs from .exceptions import ClientError @@ -49,7 +49,7 @@ def build_request(name: str, delivery: Mapping, notifications: Optional[Mapping] = None, tools: Optional[List[Mapping]] = None, - clip_to_source=False) -> dict: + clip_to_source: Optional[bool] = False) -> dict: """Construct a Subscriptions API request. The return value can be passed to @@ -73,12 +73,12 @@ def build_request(name: str, behavior. Returns: - A Python dict representation of a Subscriptions API request for - a new subscription. + dict: a representation of a Subscriptions API request for + a new subscription. Raises: - ClientError when a valid Subscriptions API request can't be - constructed. + ClientError: when a valid Subscriptions API request can't be + constructed. Examples: ```python @@ -152,27 +152,34 @@ def catalog_source( end_time: Optional[datetime] = None, rrule: Optional[str] = None, ) -> dict: - """Catalog subscription source. + """Construct a Catalog subscription source. + + The return value can be passed to + [planet.subscription_request.build_request][]. Parameters: - item_types: The class of spacecraft and processing level of the - subscription's matching items, e.g. PSScene. - asset_types: The data products which will be delivered for all subscription - matching items. An item will only match and deliver if all specified - asset types are published for that item. - geometry: The area of interest of the subscription that will be used to - determine matches. - start_time: The start time of the subscription. This time can be in the - past or future. - filter: The filter criteria based on item-level metadata. - end_time: The end time of the subscription. This time can be in the past or - future, and must be after the start_time. - rrule: The recurrence rule, given in iCalendar RFC 5545 format. Only - monthly recurrences are supported at this time. + item_types: The class of spacecraft and processing level of the + subscription's matching items, e.g. PSScene. + asset_types: The data products which will be delivered for all + subscription matching items. An item will only match and + deliver if all specified asset types are published for that + item. + geometry: The area of interest of the subscription that will be + used to determine matches. + start_time: The start time of the subscription. This time can be + in the past or future. + filter: The filter criteria based on item-level metadata. + end_time: The end time of the subscription. This time can be in + the past or future, and must be after the start_time. + rrule: The recurrence rule, given in iCalendar RFC 5545 format. + Only monthly recurrences are supported at this time. + + Returns: + dict: a representation of a subscription source. Raises: - planet.exceptions.ClientError: If start_time or end_time are not valid - datetimes + ClientError: if a source can not be + configured. """ if len(item_types) > 1: raise ClientError( @@ -212,6 +219,86 @@ def catalog_source( return {"type": "catalog", "parameters": parameters} +def planetary_variable_source( + var_type: Literal["biomass_proxy", + "land_surface_temperature", + "soil_water_content", + "vegetation_optical_depth"], + var_id: str, + geometry: Mapping, + start_time: datetime, + end_time: Optional[datetime] = None, +) -> dict: + """Construct a Planetary Variable subscription source. + + Planetary Variables come in 4 types and are further subdivided + within these types. See [Subscribing to Planetary + Variables](https://developers.planet.com/docs/subscriptions/pvs-subs/#planetary-variables-types-and-ids) + for details. + + The return value can be passed to + [planet.subscription_request.build_request][]. + + Note: this function does not validate variable types and ids. + + Parameters: + var_type: one of "biomass_proxy", "land_surface_temperature", + "soil_water_content", or "vegetation_optical_depth". + var_id: a value such as "SWC-AMSR2-C_V1.0_100" for soil water + content derived from AMSR2 C band. + geometry: The area of interest of the subscription that will be + used to determine matches. + start_time: The start time of the subscription. This time can be + in the past or future. + end_time: The end time of the subscription. This time can be in + the past or future, and must be after the start_time. + + Returns: + dict: a representation of a subscription source. + + Raises: + ClientError: if a source can not be + configured. + + Examples: + ```python + >>> source = planetary_variable_source( + ... "soil_water_content", + ... "SWC-AMSR2-C_V1.0_100", + ... geometry={ + ... "type": "Polygon", + ... "coordinates": [[[37.791595458984375, 14.84923123791421], + ... [37.90214538574219, 14.84923123791421], + ... [37.90214538574219, 14.945448293647944], + ... [37.791595458984375, 14.945448293647944], + ... [37.791595458984375, 14.84923123791421]]] + ... }, + ... start_time=datetime(2021, 3, 1) + ... ) + >>> request = build_request(source=source, ...) + ``` + """ + # TODO: validation of variable types and ids. + + parameters = { + "id": var_id, + "geometry": geojson.as_geom(dict(geometry)), + } + + try: + parameters['start_time'] = _datetime_to_rfc3339(start_time) + except AttributeError: + raise ClientError('Could not convert start_time to an iso string') + + if end_time: + try: + parameters['end_time'] = _datetime_to_rfc3339(end_time) + except AttributeError: + raise ClientError('Could not convert end_time to an iso string') + + return {"type": var_type, "parameters": parameters} + + def _datetime_to_rfc3339(value: datetime) -> str: """Converts the datetime to an RFC3339 string""" iso = value.isoformat()
planetlabs/planet-client-python
79d9a3cb952fcd4f75e5e935ee067455580c779d
diff --git a/tests/integration/test_subscriptions_api.py b/tests/integration/test_subscriptions_api.py index e1ea533..7046c02 100644 --- a/tests/integration/test_subscriptions_api.py +++ b/tests/integration/test_subscriptions_api.py @@ -128,7 +128,12 @@ def result_pages(status=None, size=40): # must disable the default. res_api_mock = respx.mock(assert_all_called=False) -# 1. Request for status: created. Response has three pages. +# 1. CSV results +res_api_mock.route( + M(url__startswith=TEST_URL), M(params__contains={'format': 'csv'})).mock( + side_effect=[Response(200, text="id,status\n1234-abcd,SUCCESS\n")]) + +# 2. Request for status: created. Response has three pages. res_api_mock.route( M(url__startswith=TEST_URL), M(params__contains={'status': 'created'})).mock(side_effect=[ @@ -136,12 +141,12 @@ res_api_mock.route( for page in result_pages(status={'created'}, size=40) ]) -# 2. Request for status: queued. Response has a single empty page. +# 3. Request for status: queued. Response has a single empty page. res_api_mock.route(M(url__startswith=TEST_URL), M(params__contains={'status': 'queued'})).mock( side_effect=[Response(200, json={'results': []})]) -# 3. No status requested. Response is the same as for 1. +# 4. No status requested. Response is the same as for 1. res_api_mock.route(M(url__startswith=TEST_URL)).mock( side_effect=[Response(200, json=page) for page in result_pages(size=40)]) @@ -276,6 +281,18 @@ async def test_get_results_success(): assert len(results) == 100 [email protected] +@res_api_mock +async def test_get_results_csv(): + """Subscription CSV fetched, has the expected items.""" + async with Session() as session: + client = SubscriptionsClient(session, base_url=TEST_URL) + results = [res async for res in client.get_results_csv("42")] + import csv + rows = list(csv.reader(results)) + assert rows == [['id', 'status'], ['1234-abcd', 'SUCCESS']] + + paging_cycle_api_mock = respx.mock() # Identical next links is a hangup we want to avoid. diff --git a/tests/integration/test_subscriptions_cli.py b/tests/integration/test_subscriptions_cli.py index 3f9feef..5fe4b42 100644 --- a/tests/integration/test_subscriptions_cli.py +++ b/tests/integration/test_subscriptions_cli.py @@ -306,3 +306,11 @@ def test_request_catalog_success(invoke, geom_geojson): ]) assert json.loads(result.output) == source assert result.exit_code == 0 # success. + + +@res_api_mock +def test_subscriptions_results_csv(invoke): + """Get results as CSV.""" + result = invoke(['results', 'test', '--csv']) + assert result.exit_code == 0 # success. + assert result.output.splitlines() == ['id,status', '1234-abcd,SUCCESS'] diff --git a/tests/unit/test_subscription_request.py b/tests/unit/test_subscription_request.py index b22983b..69c5bae 100644 --- a/tests/unit/test_subscription_request.py +++ b/tests/unit/test_subscription_request.py @@ -346,3 +346,23 @@ def test_toar_tool_success(): expected = {"type": "toar", "parameters": {"scale_factor": 12345}} assert res == expected + + +def test_pv_source_success(geom_geojson): + """Configure a planetary variable subscription source.""" + # NOTE: this function does not yet validate type and id. + # The nonsense values are intended to fail when the function does + # add validation. + source = subscription_request.planetary_variable_source( + "var1", + "VAR1-abcd", + geometry=geom_geojson, + start_time=datetime(2021, 3, 1), + end_time=datetime(2021, 3, 2), + ) + + assert source["type"] == "var1" + params = source["parameters"] + assert params["id"] == "VAR1-abcd" + assert params["geometry"] == geom_geojson + assert params["start_time"].startswith("2021-03-01")
Support for upcoming csv timeseries subscription results **Is your feature request related to a problem? Please describe.** With the introduction of PVs to the subscription API we are also supporting timeseries in csv format. This is available on api.planet.com/subscriptions/v1 but not yet documented offically. **Describe the solution you'd like** I would like to use the `format=csv` option of the subscription API. Example request: ``` https://api.planet.com/subscriptions/v1/<uuid>/results?format=csv ``` **Additional context** [Internal document](https://docs.google.com/document/d/1dj8qPeHy4a7vR6Mu2HVuO2bfPaDU_m4y9ExBC_RFpXY/edit#heading=h.pc8fh9xlg1ef) explaining the endpoint.
0.0
79d9a3cb952fcd4f75e5e935ee067455580c779d
[ "tests/integration/test_subscriptions_api.py::test_get_results_csv", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_csv", "tests/unit/test_subscription_request.py::test_pv_source_success" ]
[ "tests/integration/test_subscriptions_api.py::test_list_subscriptions_failure", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_success[status0-100]", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_success[status1-0]", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_success[None-100]", "tests/integration/test_subscriptions_api.py::test_create_subscription_failure", "tests/integration/test_subscriptions_api.py::test_create_subscription_success", "tests/integration/test_subscriptions_api.py::test_cancel_subscription_failure", "tests/integration/test_subscriptions_api.py::test_cancel_subscription_success", "tests/integration/test_subscriptions_api.py::test_update_subscription_failure", "tests/integration/test_subscriptions_api.py::test_update_subscription_success", "tests/integration/test_subscriptions_api.py::test_get_subscription_failure", "tests/integration/test_subscriptions_api.py::test_get_subscription_success", "tests/integration/test_subscriptions_api.py::test_get_results_failure", "tests/integration/test_subscriptions_api.py::test_get_results_success", "tests/integration/test_subscriptions_api.py::test_list_subscriptions_cycle_break", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options4-0]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[--{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[--{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_get_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_get_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options4-0]", "tests/integration/test_subscriptions_cli.py::test_request_base_success", "tests/integration/test_subscriptions_cli.py::test_request_catalog_success", "tests/unit/test_subscription_request.py::test_build_request_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_success", "tests/unit/test_subscription_request.py::test_build_request_clip_to_source_failure", "tests/unit/test_subscription_request.py::test_catalog_source_success", "tests/unit/test_subscription_request.py::test_catalog_source_featurecollection", "tests/unit/test_subscription_request.py::test_catalog_source_invalid_start_time", "tests/unit/test_subscription_request.py::test_amazon_s3_success", "tests/unit/test_subscription_request.py::test_azure_blob_storage_success", "tests/unit/test_subscription_request.py::test_google_cloud_storage_success", "tests/unit/test_subscription_request.py::test_oracle_cloud_storage_success", "tests/unit/test_subscription_request.py::test_notifications_success", "tests/unit/test_subscription_request.py::test_notifications_invalid_topics", "tests/unit/test_subscription_request.py::test_band_math_tool_success", "tests/unit/test_subscription_request.py::test_band_math_tool_invalid_pixel_type", "tests/unit/test_subscription_request.py::test_clip_tool_success", "tests/unit/test_subscription_request.py::test_clip_tool_invalid_type", "tests/unit/test_subscription_request.py::test_file_format_tool_success", "tests/unit/test_subscription_request.py::test_file_format_tool_invalid_format", "tests/unit/test_subscription_request.py::test_harmonize_tool_success", "tests/unit/test_subscription_request.py::test_harmonize_tool_invalid_target_sensor", "tests/unit/test_subscription_request.py::test_reproject_tool_success", "tests/unit/test_subscription_request.py::test_reproject_tool_invalid_kernel", "tests/unit/test_subscription_request.py::test_toar_tool_success" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-11 00:35:41+00:00
apache-2.0
4,607
planetlabs__planet-client-python-988
diff --git a/CHANGES.txt b/CHANGES.txt index 9163bfe..18b7033 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -3,6 +3,9 @@ 2.1b1 (2023-07-11) Added: +- The request command of the subscriptions CLI has a new --clip-to-source + option (#988). +- A new request-pv command has been added to the subscriptions CLi (#988). - Support for catalog source publishing stages (#977) and time range types (#978) have been added to subscription_request.catalog_source. - Add the option to get Planetary Variable subscription results as a CSV file diff --git a/planet/cli/subscriptions.py b/planet/cli/subscriptions.py index 62860c0..f5551e8 100644 --- a/planet/cli/subscriptions.py +++ b/planet/cli/subscriptions.py @@ -238,14 +238,32 @@ async def list_subscription_results_cmd(ctx, '--tools', type=types.JSON(), help='Toolchain JSON. Can be a string, filename, or - for stdin.') [email protected]( + '--clip-to-source', + is_flag=True, + default=False, + help="Clip to the source geometry without specifying a clip tool.") @pretty -def request(name, source, delivery, notifications, tools, pretty): - """Generate a subscriptions request.""" +def request(name, + source, + delivery, + notifications, + tools, + clip_to_source, + pretty): + """Generate a subscriptions request. + + Note: the next version of the Subscription API will remove the clip + tool option and always clip to the source geometry. Thus the + --clip-to-source option is a preview of the next API version's + default behavior. + """ res = subscription_request.build_request(name, source, delivery, notifications=notifications, - tools=tools) + tools=tools, + clip_to_source=clip_to_source) echo_json(res, pretty) @@ -298,3 +316,49 @@ def request_catalog(item_types, rrule=rrule, filter=filter) echo_json(res, pretty) + + [email protected]() # type: ignore +@translate_exceptions [email protected]( + '--var-type', + required=True, + help='Planetary variable type.', + type=click.Choice([ + "biomass_proxy", + "land_surface_temperature", + "soil_water_content", + "vegetation_optical_depth" + ]), +) [email protected]('--var-id', required=True, help='Planetary variable id.') [email protected]( + '--geometry', + required=True, + type=types.JSON(), + help="""Geometry of the area of interest of the subscription that will be + used to determine matches. Can be a string, filename, or - for stdin.""") [email protected]('--start-time', + required=True, + type=types.DateTime(), + help='Date and time to begin subscription.') [email protected]('--end-time', + type=types.DateTime(), + help='Date and time to end subscription.') +@pretty +def request_pv(var_type, var_id, geometry, start_time, end_time, pretty): + """Generate a Planetary Variable subscription source. + + Planetary Variables come in 4 types and are further subdivided + within these types. See [Subscribing to Planetary + Variables](https://developers.planet.com/docs/subscriptions/pvs-subs/#planetary-variables-types-and-ids) + for details. + """ + res = subscription_request.planetary_variable_source( + var_type, + var_id, + geometry, + start_time, + end_time=end_time, + ) + echo_json(res, pretty) diff --git a/planet/subscription_request.py b/planet/subscription_request.py index 2118a09..ca6bff2 100644 --- a/planet/subscription_request.py +++ b/planet/subscription_request.py @@ -118,8 +118,8 @@ def build_request(name: str, if notifications: details['notifications'] = dict(notifications) - if tools: - tool_list = [dict(tool) for tool in tools] + if tools or clip_to_source: + tool_list = [dict(tool) for tool in (tools or [])] # If clip_to_source is True a clip configuration will be added # to the list of requested tools unless an existing clip tool
planetlabs/planet-client-python
33f69037fd1cba672dd7622c1d9f3af30e81b54f
diff --git a/tests/integration/test_subscriptions_cli.py b/tests/integration/test_subscriptions_cli.py index 5fe4b42..a1b3cea 100644 --- a/tests/integration/test_subscriptions_cli.py +++ b/tests/integration/test_subscriptions_cli.py @@ -252,7 +252,7 @@ def test_subscriptions_results_success(invoke, options, expected_count): def test_request_base_success(invoke, geom_geojson): - """Request command succeeds""" + """Request command succeeds.""" source = json.dumps({ "type": "catalog", "parameters": { @@ -285,6 +285,32 @@ def test_request_base_success(invoke, geom_geojson): assert result.exit_code == 0 # success. +def test_request_base_clip_to_source(invoke, geom_geojson): + """Clip to source using command line option.""" + source = json.dumps({ + "type": "catalog", + "parameters": { + "geometry": geom_geojson, + "start_time": "2021-03-01T00:00:00Z", + "item_types": ["PSScene"], + "asset_types": ["ortho_analytic_4b"] + } + }) + result = invoke([ + 'request', + '--name=test', + f'--source={source}', + '--delivery={"type": "fake"}', + '--clip-to-source' + ]) + + assert result.exit_code == 0 # success. + req = json.loads(result.output) + tool = req["tools"][0] + assert tool["type"] == "clip" + assert tool["parameters"]["aoi"] == geom_geojson + + def test_request_catalog_success(invoke, geom_geojson): """Request-catalog command succeeds""" source = { @@ -314,3 +340,19 @@ def test_subscriptions_results_csv(invoke): result = invoke(['results', 'test', '--csv']) assert result.exit_code == 0 # success. assert result.output.splitlines() == ['id,status', '1234-abcd,SUCCESS'] + + +def test_request_pv_success(invoke, geom_geojson): + """Request-pv command succeeds""" + result = invoke([ + 'request-pv', + '--var-type=biomass_proxy', + '--var-id=BIOMASS-PROXY_V3.0_10', + f"--geometry={json.dumps(geom_geojson)}", + '--start-time=2021-03-01T00:00:00' + ]) + + assert result.exit_code == 0 # success. + source = json.loads(result.output) + assert source["type"] == "biomass_proxy" + assert source["parameters"]["id"] == "BIOMASS-PROXY_V3.0_10"
Add a "planet subscriptions request-pv" command Like `planet subscriptions catalog-request` https://planet-sdk-for-python-v2.readthedocs.io/en/latest/cli/cli-subscriptions/#catalog-request
0.0
33f69037fd1cba672dd7622c1d9f3af30e81b54f
[ "tests/integration/test_subscriptions_cli.py::test_request_base_clip_to_source", "tests/integration/test_subscriptions_cli.py::test_request_pv_success" ]
[ "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options4-0]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[--{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[--{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_get_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_get_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options4-0]", "tests/integration/test_subscriptions_cli.py::test_request_base_success", "tests/integration/test_subscriptions_cli.py::test_request_catalog_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_csv" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-12 03:15:12+00:00
apache-2.0
4,608
planetlabs__planet-client-python-990
diff --git a/CHANGES.txt b/CHANGES.txt index 18b7033..a78e118 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,5 +1,11 @@ 2.1.0 (TBD) +- The --cloudconfig option of the request command of the orders CLI has been + superseded by a new --delivery option, with --cloudconfig left as an alias. + New --archive-type, --archive-filename, and --single-archive options to + control the zip archiving of order outputs without any cloud storage delivery + have also been added (#990). + 2.1b1 (2023-07-11) Added: diff --git a/planet/cli/orders.py b/planet/cli/orders.py index 40ce1ba..6a6b511 100644 --- a/planet/cli/orders.py +++ b/planet/cli/orders.py @@ -253,11 +253,27 @@ async def create(ctx, request: str, pretty): default=False, is_flag=True, help='Send email notification when order is complete.') [email protected]('--archive-type', + type=click.Choice(['zip']), + help="Optionally zip archive each item bundle.") [email protected]('--archive-filename', + default='{{name}}-{{order_id}}.zip', + show_default=True, + help="Templated filename for archived bundles or orders.") [email protected]('--single-archive', + is_flag=True, + default=False, + show_default=True, + help="Optionally zip archive all item bundles together.") @click.option( + '--delivery', '--cloudconfig', type=types.JSON(), - help="""Credentials for cloud storage provider to enable cloud delivery of - data. Can be a json string, filename, or '-' for stdin.""") + help=("Delivery configuration, which may include credentials for a cloud " + "storage provider, to enable cloud delivery of data, and/or " + "parameters for bundling deliveries as zip archives. Can be a JSON " + "string, a filename, or '-' for stdin. The --cloudconfig option is " + "an alias for this use case.")) @click.option( '--stac/--no-stac', default=True, @@ -274,7 +290,10 @@ async def request(ctx, clip, tools, email, - cloudconfig, + archive_type, + archive_filename, + single_archive, + delivery, stac, pretty): """Generate an order request. @@ -303,14 +322,12 @@ async def request(ctx, except planet.exceptions.ClientError as e: raise click.BadParameter(e) - if cloudconfig: - delivery = planet.order_request.delivery(cloud_config=cloudconfig) - if "google_earth_engine" in cloudconfig: - stac = False - else: - delivery = None + delivery = planet.order_request.delivery(archive_type=archive_type, + archive_filename=archive_filename, + single_archive=single_archive, + cloud_config=delivery) - if stac: + if stac and "google_earth_engine" not in delivery: stac_json = {'stac': {}} else: stac_json = {} diff --git a/planet/cli/subscriptions.py b/planet/cli/subscriptions.py index f5551e8..eebc1a8 100644 --- a/planet/cli/subscriptions.py +++ b/planet/cli/subscriptions.py @@ -226,10 +226,13 @@ async def list_subscription_results_cmd(ctx, required=True, type=types.JSON(), help='Source JSON. Can be a string, filename, or - for stdin.') [email protected]('--delivery', - required=True, - type=types.JSON(), - help='Delivery JSON. Can be a string, filename, or - for stdin.') [email protected]( + '--delivery', + required=True, + type=types.JSON(), + help=("Delivery configuration, including credentials for a cloud " + "storage provider, to enable cloud delivery of data. Can be a " + "JSON string, a filename, or '-' for stdin. ")) @click.option( '--notifications', type=types.JSON(), diff --git a/planet/order_request.py b/planet/order_request.py index f36644c..0d4b603 100644 --- a/planet/order_request.py +++ b/planet/order_request.py @@ -15,7 +15,7 @@ """Functionality for preparing order details for use in creating an order""" from __future__ import annotations # https://stackoverflow.com/a/33533514 import logging -from typing import Optional, Any, Dict, List, Union +from typing import Any, Dict, List, Mapping, Optional, Union from . import geojson, specs from .exceptions import ClientError @@ -163,9 +163,9 @@ def notifications(email: Optional[bool] = None, def delivery(archive_type: Optional[str] = None, - single_archive: bool = False, + single_archive: Optional[bool] = False, archive_filename: Optional[str] = None, - cloud_config: Optional[dict] = None) -> dict: + cloud_config: Optional[Mapping] = None) -> dict: """Order delivery configuration. Example: @@ -196,20 +196,21 @@ def delivery(archive_type: Optional[str] = None, Raises: planet.specs.SpecificationException: If archive_type is not valid. """ + config: Dict[str, Any] = {} + if archive_type: archive_type = specs.validate_archive_type(archive_type) - # for missing archive file name if archive_filename is None: archive_filename = "{{name}}_{{order_id}}.zip" - fields = ['archive_type', 'single_archive', 'archive_filename'] - values = [archive_type, single_archive, archive_filename] - - config = dict((k, v) for k, v in zip(fields, values) if v) + config.update(archive_type=archive_type, + archive_filename=archive_filename, + single_archive=single_archive) if cloud_config: config.update(cloud_config) + return config
planetlabs/planet-client-python
0aaf6c58a484de72907381569b76d0c48bfa5627
diff --git a/tests/unit/test_order_request.py b/tests/unit/test_order_request.py index e702cce..46a01c4 100644 --- a/tests/unit/test_order_request.py +++ b/tests/unit/test_order_request.py @@ -175,6 +175,7 @@ def test_delivery_missing_archive_details(): expected = { 'archive_type': 'zip', 'archive_filename': "{{name}}_{{order_id}}.zip", + 'single_archive': False, 'amazon_s3': { 'aws_access_key_id': 'aws_access_key_id', 'aws_secret_access_key': 'aws_secret_access_key', @@ -317,3 +318,14 @@ def test_band_math_tool_invalid_pixel_type(): order_request.band_math_tool(b1='b1', b2='arctan(b1)', pixel_type="invalid") + + +def test_no_archive_items_without_type(): + """Without an archive type no filename or single option are passed.""" + delivery_config = order_request.delivery( + None, True, TEST_ARCHIVE_FILENAME, cloud_config={"bogus_storage": {}}) + + assert "bogus_storage" in delivery_config + assert "archive_type" not in delivery_config + assert "archive_filename" not in delivery_config + assert "single_archive" not in delivery_config
The --cloudconfig CLI option has an innaccurate name The `--cloudconfig` option of the `planet orders create` CLI command has an inaccurate name, as it is not limited to cloud delivery configurations. It is entirely possible to provide a local delivery config using the `--cloudconfig` option, e.g.: ``` { "archive_type": "zip", "single_archive": true, "archive_filename": "{{name}}_{{order_id}}.zip" } ``` The option should be named `--delivery` or `--deliveryconfig`, and [the docs](https://planet-sdk-for-python-v2.readthedocs.io/en/latest/cli/cli-orders/#cloud-delivery) updated accordingly. The [equivalent SDK method](https://planet-sdk-for-python-v2.readthedocs.io/en/latest/python/sdk-reference/#planet.order_request.delivery) is more accurately named `delivery`
0.0
0aaf6c58a484de72907381569b76d0c48bfa5627
[ "tests/unit/test_order_request.py::test_delivery_missing_archive_details", "tests/unit/test_order_request.py::test_no_archive_items_without_type" ]
[ "tests/unit/test_order_request.py::test_build_request", "tests/unit/test_order_request.py::test_product", "tests/unit/test_order_request.py::test_notifications", "tests/unit/test_order_request.py::test_delivery", "tests/unit/test_order_request.py::test_amazon_s3", "tests/unit/test_order_request.py::test_azure_blob_storage", "tests/unit/test_order_request.py::test_google_cloud_storage", "tests/unit/test_order_request.py::test_google_earth_engine", "tests/unit/test_order_request.py::test__tool", "tests/unit/test_order_request.py::test_clip_tool_polygon", "tests/unit/test_order_request.py::test_clip_tool_multipolygon", "tests/unit/test_order_request.py::test_clip_tool_invalid", "tests/unit/test_order_request.py::test_reproject_tool", "tests/unit/test_order_request.py::test_tile_tool", "tests/unit/test_order_request.py::test_toar_tool", "tests/unit/test_order_request.py::test_harmonization_tool_success", "tests/unit/test_order_request.py::test_harmonization_tool_invalid_target_sensor", "tests/unit/test_order_request.py::test_band_math_tool_success", "tests/unit/test_order_request.py::test_band_math_tool_invalid_pixel_type" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-13 23:02:38+00:00
apache-2.0
4,609
planetlabs__planet-client-python-991
diff --git a/CHANGES.txt b/CHANGES.txt index a78e118..fc85a88 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,5 +1,7 @@ 2.1.0 (TBD) +- Support for publishing stages and time range types has been added to the + subscriptions CLI (#992). - The --cloudconfig option of the request command of the orders CLI has been superseded by a new --delivery option, with --cloudconfig left as an alias. New --archive-type, --archive-filename, and --single-archive options to diff --git a/docs/cli/cli-orders.md b/docs/cli/cli-orders.md index ff0dd76..48d9d27 100644 --- a/docs/cli/cli-orders.md +++ b/docs/cli/cli-orders.md @@ -149,6 +149,33 @@ planet orders request \ } ``` +#### Zip archives of bundles and orders + +You can request that all files of a bundle be zipped together by using the `--archive-type` option. The only type of archive currently available is "zip". + +```sh +planet orders request \ + --item-type PSScene \ + --bundle analytic_sr_udm2 \ + --name 'My First Zipped Order' \ + --archive-type zip \ + 20220605_124027_64_242b +``` + +You can request that all files of the entire order be zipped together by using the `--single-archive` option. + +```sh +planet orders request \ + --item-type PSScene \ + --bundle analytic_sr_udm2 \ + --name 'My First Zipped Order' \ + --archive-type zip \ + --single-archive \ + 20220605_124027_64_242b,20220605_124025_34_242b +``` + +*New in version 2.1* + ### Save an Order Request The above command just prints out the necessary JSON to create an order. To actually use it you can @@ -613,13 +640,13 @@ Orders with Google Earth Engine delivery will force the STAC flag to false. ### Cloud Delivery Another option is to delivery your orders directly to a cloud bucket, like AWS S3 or Google Cloud Storage. -The file given with the `--cloudconfig` option should contain JSON that follows +The file given with the `--delivery` option should contain JSON that follows the options and format given in [Delivery to Cloud Storage](https://developers.planet.com/docs/orders/delivery/#delivery-to-cloud-storage). An example would be: -Example: `cloudconfig.json` +Example: `delivery.json` ```json { @@ -633,6 +660,12 @@ Example: `cloudconfig.json` } ``` +*New in 2.1* + +!!! note + + `--cloudconfig` was the name of this option in version 2.0 and can continue to be used as an alias for `--delivery`. + ### Using Orders output as input One useful thing to note is that the order JSON that reports status and location is a valid Orders API request. diff --git a/docs/cli/cli-subscriptions.md b/docs/cli/cli-subscriptions.md index 1e48fb1..006aa5f 100644 --- a/docs/cli/cli-subscriptions.md +++ b/docs/cli/cli-subscriptions.md @@ -157,21 +157,33 @@ planet subscriptions get cb817760-1f07-4ee7-bba6-bcac5346343f To see what items have been delivered to your cloud bucket you can use the `results` command: ```sh -planet subscriptions results cb817760-1f07-4ee7-bba6-bcac5346343f +planet subscriptions results SUBSCRIPTION_ID ``` +`SUBSCRIPTION_ID` above is a placeholder for a unique subscription identifier, which will be a UUID like `cb817760-1f07-4ee7-bba6-bcac5346343f`. + By default this displays the first 100 results. As with other commands, you can use the `--limit` param to set a higher limit, or set it to 0 to see all results (this can be quite large with subscriptions results). You can also filter by status: ```sh -planet subscriptions results --status processing +planet subscriptions results SUBSCRIPTION_ID --status processing ``` The available statuses are `created`, `queued`, `processing`, `failed`, and `success`. Note it’s quite useful to use `jq` to help filter out results as well. +#### Results as comma-seperated values (CSV) + +Planetary Variable subscribers can benefit from retrieving results as a CSV. The results contain variable statistics and can serve as data for time series analysis and visualization. + +```sh +planet subscriptions results SUBSCRIPTION_ID --csv +``` + +*New in version 2.1* + ### Update Subscription You can update a subscription that is running, for example to change the 'tools' it’s using or to alter @@ -306,6 +318,28 @@ planet data filter --range clear_percent gt 90 \ Do not bother with geometry or date filters, as they will be ignored in favor of the `--start-time` and `--geometry` values that are required. +#### Publishing stages and time range types + +By using the `--time-range-type` you can choose to temporally filter by +acquisition or publication time. The `--publishing-stage` option allows you to +receive the earliest preview imagery or wait until finalized imagery is +available. See [Catalog Source +Types:Parameters](https://developers.planet.com/docs/subscriptions/source/#parameters) +for more details. + +```sh +planet subscriptions request-catalog \ + --item-types PSScene \ + --asset-types ortho_analytic_8b \ + --geometry geometry.geojson \ + --start-time 2022-08-24T00:00:00-07:00 \ + --time-range-type acquired \ + --publishing-stage finalized \ + --filter filter.json +``` + +*New in version 2.1* + #### Saving the output You’ll likely want to save the output of your `request-catalog` call to disk, so that you can more easily use it in constructing the complete subscription @@ -321,6 +355,25 @@ planet subscriptions request-catalog \ --filter filter.json > request-catalog.json ``` +### Planetary Variable Request + +Subscribing to Planetary Variables is much like subscribing to imagery from +Planet's catalog. The `planet subscriptions request-pv` command can construct the source +part of a Planetary Variable request like `request-catalog` does for cataloged +imagery. Planetary Variable subscriptions come in 4 types and are further +subdivided within these types by an identifier. See [Subscribing to Planetary +Variables](https://developers.planet.com/docs/subscriptions/pvs-subs/#planetary-variables-types-and-ids) +for details. To constrain data delivery by space and time, you will use the +`--geometry`, `start-time`, and `end-time` options described above. + +```sh +planet subscriptions request-pv \ + --var-type biomass_proxy \ + --var-id BIOMASS-PROXY_V3.0_10 \ + --geometry geometry.geojson \ + --start-time 2022-08-24T00:00:00-07:00 > request-pv.json +``` + ### Subscription Tools Now we’ll dive into some of the tools options for subscriptions. These are quite similar to the tools for @@ -332,51 +385,18 @@ for Orders, future of the versions of the CLI will likely add `tools` convenienc The most used tool is the `clip` operation, which lets you pass a geometry to the Subscriptions API and it creates new images that only have pixels within the geometry you -gave it. We’ll use the same geometry from [above](#geometry), as it is quite -typical to use the same subscription geometry as the clip geometry, so you don't get -any pixels outside of your area of interest (99.9% of all subscriptions use the clip -tool, so it’s strongly recommended to also use clip). The proper 'clip' tool for it -would be: +gave it. 99% of the time you will want to clip to the subscription geometry. The easiest way to do this is to use the `--clip-to-source` option added to the `subscriptions request` command in version 2.1. -```json -[ - { - "type": "clip", - "parameters": { - "aoi": { - "type": "Polygon", - "coordinates": [ - [ - [ - -163.828125, - -44.59046718130883 - ], - [ - 181.7578125, - -44.59046718130883 - ], - [ - 181.7578125, - 78.42019327591201 - ], - [ - -163.828125, - 78.42019327591201 - ], - [ - -163.828125, - -44.59046718130883 - ] - ] - ] - } - } - } -] +```sh +planet subscriptions request \ + --name 'Clipped Subscription' \ + --source request-catalog.json \ + --delivery cloud-delivery.json \ + --clip-to-source ``` -You can save this tools as `tools.json` to include in the `subscriptions request` -command. +*New in version 2.1* + #### Additional Tools diff --git a/planet/cli/subscriptions.py b/planet/cli/subscriptions.py index eebc1a8..f3f1b37 100644 --- a/planet/cli/subscriptions.py +++ b/planet/cli/subscriptions.py @@ -301,6 +301,16 @@ def request(name, '--filter', type=types.JSON(), help='Search filter. Can be a string, filename, or - for stdin.') [email protected]( + '--publishing-stage', + 'publishing_stages', + type=click.Choice(["preview", "standard", "finalized"]), + multiple=True, + help=("Subscribe to results at a particular publishing stage. Multiple " + "instances of this option are allowed.")) [email protected]('--time-range-type', + type=click.Choice(["acquired", "published"]), + help="Subscribe by acquisition time or time of publication.") @pretty def request_catalog(item_types, asset_types, @@ -309,15 +319,20 @@ def request_catalog(item_types, end_time, rrule, filter, + publishing_stages, + time_range_type, pretty): """Generate a subscriptions request catalog source description.""" - res = subscription_request.catalog_source(item_types, - asset_types, - geometry, - start_time, - end_time=end_time, - rrule=rrule, - filter=filter) + res = subscription_request.catalog_source( + item_types, + asset_types, + geometry, + start_time, + end_time=end_time, + rrule=rrule, + filter=filter, + publishing_stages=publishing_stages, + time_range_type=time_range_type) echo_json(res, pretty)
planetlabs/planet-client-python
abb65a4fbf4c6eee3ca9731c7d4918efd82c7f7f
diff --git a/tests/integration/test_subscriptions_cli.py b/tests/integration/test_subscriptions_cli.py index a1b3cea..1e40eb9 100644 --- a/tests/integration/test_subscriptions_cli.py +++ b/tests/integration/test_subscriptions_cli.py @@ -12,6 +12,7 @@ There are 6 subscriptions commands: TODO: tests for 3 options of the planet-subscriptions-results command. """ +import itertools import json from click.testing import CliRunner @@ -356,3 +357,46 @@ def test_request_pv_success(invoke, geom_geojson): source = json.loads(result.output) assert source["type"] == "biomass_proxy" assert source["parameters"]["id"] == "BIOMASS-PROXY_V3.0_10" + + [email protected]( + # Test all the combinations of the three options plus some with dupes. + "publishing_stages", + list( + itertools.chain.from_iterable( + itertools.combinations(["preview", "standard", "finalized"], i) + for i in range(1, 4))) + [("preview", "preview"), + ("preview", "finalized", "preview")]) +def test_catalog_source_publishing_stages(invoke, + geom_geojson, + publishing_stages): + """Catalog source publishing stages are configured.""" + result = invoke([ + 'request-catalog', + '--item-types=PSScene', + '--asset-types=ortho_analytic_4b', + f"--geometry={json.dumps(geom_geojson)}", + '--start-time=2021-03-01T00:00:00', + ] + [f'--publishing-stage={stage}' for stage in publishing_stages]) + + assert result.exit_code == 0 # success. + req = json.loads(result.output) + assert req['parameters']['publishing_stages'] == list( + set(publishing_stages)) + + [email protected]("time_range_type", ["acquired", "published"]) +def test_catalog_source_time_range_type(invoke, geom_geojson, time_range_type): + """Catalog source time range type is configured.""" + result = invoke([ + 'request-catalog', + '--item-types=PSScene', + '--asset-types=ortho_analytic_4b', + f"--geometry={json.dumps(geom_geojson)}", + '--start-time=2021-03-01T00:00:00', + f'--time-range-type={time_range_type}', + ]) + + assert result.exit_code == 0 # success. + req = json.loads(result.output) + assert req['parameters']['time_range_type'] == time_range_type
Recommend clip-to-source in subscription docs For example, this section can be mostly eliminated https://planet-sdk-for-python-v2.readthedocs.io/en/latest/cli/cli-subscriptions/#clipping.
0.0
abb65a4fbf4c6eee3ca9731c7d4918efd82c7f7f
[ "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages0]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages1]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages2]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages3]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages4]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages5]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages6]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages7]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_publishing_stages[publishing_stages8]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_time_range_type[acquired]", "tests/integration/test_subscriptions_cli.py::test_catalog_source_time_range_type[published]" ]
[ "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_list_options[options4-0]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[--{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_create_success[{\"name\":", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[--{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_bad_request[{0:", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_cancel_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_update_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_get_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_get_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_failure", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options0-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options1-100]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options2-1]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options3-2]", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_success[options4-0]", "tests/integration/test_subscriptions_cli.py::test_request_base_success", "tests/integration/test_subscriptions_cli.py::test_request_base_clip_to_source", "tests/integration/test_subscriptions_cli.py::test_request_catalog_success", "tests/integration/test_subscriptions_cli.py::test_subscriptions_results_csv", "tests/integration/test_subscriptions_cli.py::test_request_pv_success" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2023-07-17 17:40:28+00:00
apache-2.0
4,610
platformio__platformio-core-4852
diff --git a/HISTORY.rst b/HISTORY.rst index bfe1dcba..4f1cc2d3 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -22,6 +22,7 @@ test-driven methodologies, and modern toolchains for unrivaled success. * Broadened version support for the ``pyelftools`` dependency, enabling compatibility with lower versions and facilitating integration with a wider range of third-party tools (`issue #4834 <https://github.com/platformio/platformio-core/issues/4834>`_) * Resolved an issue related to the relative package path in the `pio pkg publish <https://docs.platformio.org/en/latest/core/userguide/pkg/cmd_publish.html>`__ command +* Addressed an issue where passing a relative path (``--project-dir``) to the `pio project init <https://docs.platformio.org/en/latest/core/userguide/project/cmd_init.html>`__ command resulted in an error (`issue #4847 <https://github.com/platformio/platformio-core/issues/4847>`_) 6.1.13 (2024-01-12) ~~~~~~~~~~~~~~~~~~~ diff --git a/platformio/assets/system/99-platformio-udev.rules b/platformio/assets/system/99-platformio-udev.rules index 992676db..bc31ac74 100644 --- a/platformio/assets/system/99-platformio-udev.rules +++ b/platformio/assets/system/99-platformio-udev.rules @@ -36,6 +36,8 @@ ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", MODE:="0666", ENV{ID_MM_DEVIC # QinHeng Electronics HL-340 USB-Serial adapter ATTRS{idVendor}=="1a86", ATTRS{idProduct}=="7523", MODE:="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID_MM_PORT_IGNORE}="1" +# QinHeng Electronics CH343 USB-Serial adapter +ATTRS{idVendor}=="1a86", ATTRS{idProduct}=="55d3", MODE:="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID_MM_PORT_IGNORE}="1" # QinHeng Electronics CH9102 USB-Serial adapter ATTRS{idVendor}=="1a86", ATTRS{idProduct}=="55d4", MODE:="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID_MM_PORT_IGNORE}="1" @@ -173,4 +175,4 @@ ATTRS{product}=="*CMSIS-DAP*", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="2107", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID_MM_PORT_IGNORE}="1" # Espressif USB JTAG/serial debug unit -ATTRS{idVendor}=="303a", ATTR{idProduct}=="1001", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID_MM_PORT_IGNORE}="1" \ No newline at end of file +ATTRS{idVendor}=="303a", ATTR{idProduct}=="1001", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1", ENV{ID_MM_PORT_IGNORE}="1" diff --git a/platformio/pipdeps.py b/platformio/pipdeps.py index f41218ea..e6193413 100644 --- a/platformio/pipdeps.py +++ b/platformio/pipdeps.py @@ -35,7 +35,7 @@ def get_pip_dependencies(): home = [ # PIO Home requirements "ajsonrpc == 1.2.*", - "starlette >=0.19, <0.36", + "starlette >=0.19, <0.38", "uvicorn %s" % ("== 0.16.0" if PY36 else ">=0.16, <0.28"), "wsproto == 1.*", ] diff --git a/platformio/project/commands/init.py b/platformio/project/commands/init.py index 4dae6e58..bc3ac61a 100644 --- a/platformio/project/commands/init.py +++ b/platformio/project/commands/init.py @@ -79,6 +79,7 @@ def project_init_cmd( env_prefix, silent, ): + project_dir = os.path.abspath(project_dir) is_new_project = not is_platformio_project(project_dir) if is_new_project: if not silent:
platformio/platformio-core
255e91b51c7076c322a748c746a5fa63ceae6ae8
diff --git a/tests/commands/test_init.py b/tests/commands/test_init.py index 651cf579..a8bffc5d 100644 --- a/tests/commands/test_init.py +++ b/tests/commands/test_init.py @@ -15,6 +15,7 @@ import json import os +from platformio import fs from platformio.commands.boards import cli as cmd_boards from platformio.project.commands.init import project_init_cmd from platformio.project.config import ProjectConfig @@ -36,27 +37,28 @@ def test_init_default(clirunner, validate_cliresult): validate_pioproject(os.getcwd()) -def test_init_ext_folder(clirunner, validate_cliresult): - with clirunner.isolated_filesystem(): - ext_folder_name = "ext_folder" - os.makedirs(ext_folder_name) - result = clirunner.invoke(project_init_cmd, ["-d", ext_folder_name]) - validate_cliresult(result) - validate_pioproject(os.path.join(os.getcwd(), ext_folder_name)) - - def test_init_duplicated_boards(clirunner, validate_cliresult, tmpdir): - with tmpdir.as_cwd(): - for _ in range(2): - result = clirunner.invoke( - project_init_cmd, - ["-b", "uno", "-b", "uno", "--no-install-dependencies"], - ) - validate_cliresult(result) - validate_pioproject(str(tmpdir)) - config = ProjectConfig(os.path.join(os.getcwd(), "platformio.ini")) - config.validate() - assert set(config.sections()) == set(["env:uno"]) + project_dir = str(tmpdir.join("ext_folder")) + os.makedirs(project_dir) + + with fs.cd(os.path.dirname(project_dir)): + result = clirunner.invoke( + project_init_cmd, + [ + "-d", + os.path.basename(project_dir), + "-b", + "uno", + "-b", + "uno", + "--no-install-dependencies", + ], + ) + validate_cliresult(result) + validate_pioproject(project_dir) + config = ProjectConfig(os.path.join(project_dir, "platformio.ini")) + config.validate() + assert set(config.sections()) == set(["env:uno"]) def test_init_ide_without_board(clirunner, tmpdir):
Add support for LuatOS ESP32C3-CORE Development Board Add support for LuatOS ESP32-C3 Development Board https://wiki.luatos.org/chips/esp32c3/board.html Bus 001 Device 012: ID 1a86:55d3 QinHeng Electronics USB Single Serial
0.0
255e91b51c7076c322a748c746a5fa63ceae6ae8
[ "tests/commands/test_init.py::test_init_duplicated_boards" ]
[ "tests/commands/test_init.py::test_init_default", "tests/commands/test_init.py::test_init_ide_without_board", "tests/commands/test_init.py::test_init_ide_vscode", "tests/commands/test_init.py::test_init_ide_eclipse", "tests/commands/test_init.py::test_init_special_board", "tests/commands/test_init.py::test_init_enable_auto_uploading", "tests/commands/test_init.py::test_init_custom_framework", "tests/commands/test_init.py::test_init_incorrect_board" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2024-02-02 14:09:32+00:00
apache-2.0
4,611
plotly__dash-351
diff --git a/CHANGELOG.md b/CHANGELOG.md index e96b0267..5e8d53aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.26.3 - 2018-08-27 +## Fixed +- Prefix assets files with `requests_pathname_prefix`. [#351](https://github.com/plotly/dash/pull/351) + +## Added +- `Dash.get_asset_url` will give the prefixed url for the asset file. + ## 0.26.2 - 2018-08-26 ## Fixed - Only create the assets blueprint once for app that provide the same flask instance to multiple dash instance. [#343](https://github.com/plotly/dash/pull/343) diff --git a/dash/_utils.py b/dash/_utils.py index bc71fde9..9b1a48fa 100644 --- a/dash/_utils.py +++ b/dash/_utils.py @@ -20,6 +20,18 @@ def format_tag(tag_name, attributes, inner='', closed=False, opened=False): '{}="{}"'.format(k, v) for k, v in attributes.items()])) +def get_asset_path(requests_pathname, routes_pathname, asset_path): + i = requests_pathname.rfind(routes_pathname) + req = requests_pathname[:i] + + return '/'.join([ + # Only take the first part of the pathname + req, + 'assets', + asset_path + ]) + + class AttributeDict(dict): """ Dictionary subclass enabling attribute lookup/assignment of keys/values. diff --git a/dash/dash.py b/dash/dash.py index 4cfd0d46..81777610 100644 --- a/dash/dash.py +++ b/dash/dash.py @@ -24,6 +24,7 @@ from . import exceptions from ._utils import AttributeDict as _AttributeDict from ._utils import interpolate_str as _interpolate from ._utils import format_tag as _format_tag +from ._utils import get_asset_path as _get_asset_path from . import _configs @@ -329,9 +330,9 @@ class Dash(object): 'Serving files from absolute_path isn\'t supported yet' ) elif 'asset_path' in resource: - static_url = flask.url_for('assets.static', - filename=resource['asset_path'], - mod=resource['ts']) + static_url = self.get_asset_url(resource['asset_path']) + # Add a bust query param + static_url += '?m={}'.format(resource['ts']) srcs.append(static_url) return srcs @@ -942,6 +943,12 @@ class Dash(object): elif f == 'favicon.ico': self._favicon = path + def get_asset_url(self, path): + return _get_asset_path( + self.config.requests_pathname_prefix, + self.config.routes_pathname_prefix, + path) + def run_server(self, port=8050, debug=False, diff --git a/dash/version.py b/dash/version.py index c42375ce..be363825 100644 --- a/dash/version.py +++ b/dash/version.py @@ -1,1 +1,1 @@ -__version__ = '0.26.2' +__version__ = '0.26.3'
plotly/dash
2c1d39f8b2fa2883857aa30bbb4245e0fe2a64e0
diff --git a/tests/test_configs.py b/tests/test_configs.py index ee296733..6b65613a 100644 --- a/tests/test_configs.py +++ b/tests/test_configs.py @@ -2,6 +2,7 @@ import unittest # noinspection PyProtectedMember from dash import _configs from dash import exceptions as _exc +from dash._utils import get_asset_path import os @@ -88,6 +89,21 @@ class MyTestCase(unittest.TestCase): _, routes, req = _configs.pathname_configs() self.assertEqual('/requests/', req) + def test_pathname_prefix_assets(self): + req = '/' + routes = '/' + path = get_asset_path(req, routes, 'reset.css') + self.assertEqual('/assets/reset.css', path) + + req = '/requests/' + path = get_asset_path(req, routes, 'reset.css') + self.assertEqual('/requests/assets/reset.css', path) + + req = '/requests/routes/' + routes = '/routes/' + path = get_asset_path(req, routes, 'reset.css') + self.assertEqual('/requests/assets/reset.css', path) + if __name__ == '__main__': unittest.main()
Assets are not prefixed with `requests_pathname_prefix`. The assets files should be prefixed with the `requests_pathname_prefix` like other resources.
0.0
2c1d39f8b2fa2883857aa30bbb4245e0fe2a64e0
[ "tests/test_configs.py::MyTestCase::test_invalid_pathname_prefix", "tests/test_configs.py::MyTestCase::test_pathname_prefix_assets", "tests/test_configs.py::MyTestCase::test_pathname_prefix_environ_requests", "tests/test_configs.py::MyTestCase::test_pathname_prefix_environ_routes", "tests/test_configs.py::MyTestCase::test_pathname_prefix_from_environ_app_name", "tests/test_configs.py::MyTestCase::test_valid_pathname_prefix_init" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-08-24 20:41:15+00:00
mit
4,612
plotly__dash-726
diff --git a/dash/development/_py_components_generation.py b/dash/development/_py_components_generation.py index 66db6a12..1b5e2d52 100644 --- a/dash/development/_py_components_generation.py +++ b/dash/development/_py_components_generation.py @@ -488,11 +488,12 @@ def map_js_to_py_types_prop_types(type_object): if js_to_py_type(subType) != '')), # React's PropTypes.arrayOf - arrayOf=lambda: 'list'.format( # pylint: disable=too-many-format-args - ' of {}s'.format( - js_to_py_type(type_object['value'])) - if js_to_py_type(type_object['value']) != '' - else ''), + arrayOf=lambda: ( + "list" + ((" of {}s").format( + js_to_py_type(type_object["value"])) + if js_to_py_type(type_object["value"]) != "" + else "") + ), # React's PropTypes.objectOf objectOf=lambda: ( diff --git a/dash/development/_r_components_generation.py b/dash/development/_r_components_generation.py index 616f1b66..bbb8315f 100644 --- a/dash/development/_r_components_generation.py +++ b/dash/development/_r_components_generation.py @@ -6,6 +6,7 @@ import sys import shutil import glob import importlib +import textwrap import re from ._all_keywords import r_keywords @@ -41,15 +42,15 @@ deps_metadata <- list(""" frame_element_template = """`{dep_name}` = structure(list(name = "{dep_name}", version = "{project_ver}", src = list(href = NULL, file = "deps"), meta = NULL, -script = "{dep_rpp}", -stylesheet = NULL, head = NULL, attachment = NULL, package = "{rpkgname}", -all_files = FALSE), class = "html_dependency")""" +script = {script_name}, +stylesheet = {css_name}, head = NULL, attachment = NULL, package = "{rpkgname}", +all_files = FALSE), class = "html_dependency")""" # noqa:E501 frame_body_template = """`{project_shortname}` = structure(list(name = "{project_shortname}", version = "{project_ver}", src = list(href = NULL, file = "deps"), meta = NULL, -script = "{dep_rpp}", -stylesheet = NULL, head = NULL, attachment = NULL, package = "{rpkgname}", +script = {script_name}, +stylesheet = {css_name}, head = NULL, attachment = NULL, package = "{rpkgname}", all_files = FALSE), class = "html_dependency")""" # noqa:E501 frame_close_template = """) @@ -70,7 +71,7 @@ help_string = """% Auto-generated: do not edit by hand }} \\usage{{ -{prefix}{name}({default_argtext}) +{argtext} }} \\arguments{{ @@ -237,7 +238,8 @@ def generate_js_metadata(pkg_data, project_shortname): # import component library module into sys mod = sys.modules[project_shortname] - jsdist = getattr(mod, "_js_dist", []) + alldist = getattr(mod, "_js_dist", []) + getattr(mod, "_css_dist", []) + project_ver = pkg_data.get("version") rpkgname = snake_case_to_camel_case(project_shortname) @@ -253,29 +255,45 @@ def generate_js_metadata(pkg_data, project_shortname): function_frame_body = [] # pylint: disable=consider-using-enumerate - if len(jsdist) > 1: - for dep in range(len(jsdist)): - if "dash_" in jsdist[dep]["relative_package_path"]: - dep_name = jsdist[dep]["relative_package_path"].split(".")[0] + if len(alldist) > 1: + for dep in range(len(alldist)): + rpp = alldist[dep]["relative_package_path"] + if "dash_" in rpp: + dep_name = rpp.split(".")[0] else: dep_name = "{}_{}".format(project_shortname, str(dep)) project_ver = str(dep) + if "css" in rpp: + css_name = "'{}'".format(rpp) + script_name = 'NULL' + else: + script_name = "'{}'".format(rpp) + css_name = 'NULL' function_frame += [ frame_element_template.format( dep_name=dep_name, project_ver=project_ver, rpkgname=rpkgname, project_shortname=project_shortname, - dep_rpp=jsdist[dep]["relative_package_path"], + script_name=script_name, + css_name=css_name, ) ] function_frame_body = ",\n".join(function_frame) - elif len(jsdist) == 1: + elif len(alldist) == 1: + rpp = alldist[0]["relative_package_path"] + if "css" in rpp: + css_name = rpp + script_name = "NULL" + else: + script_name = rpp + css_name = "NULL" function_frame_body = frame_body_template.format( project_shortname=project_shortname, project_ver=project_ver, rpkgname=rpkgname, - dep_rpp=jsdist[0]["relative_package_path"], + script_name=script_name, + css_name=css_name, ) function_string = "".join( @@ -318,42 +336,45 @@ def write_help_file(name, props, description, prefix): default_argtext += ", ".join("{}=NULL".format(p) for p in prop_keys) item_text += "\n\n".join( - "\\item{{{}}}{{{}}}".format(p, props[p]["description"]) + "\\item{{{}}}{{{}{}}}".format(p, + print_r_type( + props[p]["type"] + ), + props[p]["description"]) for p in prop_keys ) if has_wildcards: - item_text += "\n\n\\item{...}{wildcards: `data-*` or `aria-*`}" - default_argtext += ", ..." - - file_path = os.path.join("man", file_name) - with open(file_path, "w") as f: - f.write( - help_string.format( - prefix=prefix, - name=name, - default_argtext=default_argtext, - item_text=item_text, - description=description.replace("\n", " "), - ) - ) + item_text += '\n\n\\item{...}{wildcards: `data-*` or `aria-*`}' + default_argtext += ', ...' + + # in R, the online help viewer does not properly wrap lines for + # the usage string -- we will hard wrap at 80 characters using + # textwrap.fill, starting from the beginning of the usage string + argtext = prefix + name + "({})".format(default_argtext) + + file_path = os.path.join('man', file_name) + with open(file_path, 'w') as f: + f.write(help_string.format( + prefix=prefix, + name=name, + argtext=textwrap.fill(argtext, + width=80, + break_long_words=False), + item_text=item_text, + description=description.replace('\n', ' ') + )) def write_class_file(name, props, description, project_shortname, prefix=None): props = reorder_props(props=props) - # generate the R help pages for each of the Dash components that we - # are transpiling -- this is done to avoid using Roxygen2 syntax, - # we may eventually be able to generate similar documentation using - # doxygen and an R plugin, but for now we'll just do it on our own - # from within Python - write_help_file(name, props, description, prefix) - import_string = "# AUTO GENERATED FILE - DO NOT EDIT\n\n" class_string = generate_class_string(name, props, project_shortname, prefix) + file_name = "{}{}.R".format(prefix, name) file_path = os.path.join("R", file_name) @@ -361,6 +382,18 @@ def write_class_file(name, props, description, project_shortname, prefix=None): f.write(import_string) f.write(class_string) + # generate the R help pages for each of the Dash components that we + # are transpiling -- this is done to avoid using Roxygen2 syntax, + # we may eventually be able to generate similar documentation using + # doxygen and an R plugin, but for now we'll just do it on our own + # from within Python + write_help_file( + name, + props, + description, + prefix + ) + print("Generated {}".format(file_name)) @@ -625,3 +658,154 @@ def generate_exports( package_imports, package_suggests, ) + + +def get_r_prop_types(type_object): + """Mapping from the PropTypes js type object to the R type""" + + def shape_or_exact(): + return 'lists containing elements {}.\n{}'.format( + ', '.join( + "'{}'".format(t) for t in list(type_object['value'].keys()) + ), + 'Those elements have the following types:\n{}'.format( + '\n'.join( + create_prop_docstring_r( + prop_name=prop_name, + type_object=prop, + required=prop['required'], + description=prop.get('description', ''), + indent_num=1 + ) for prop_name, prop in + list(type_object['value'].items()))) + ) + + return dict( + array=lambda: "unnamed list", + bool=lambda: "logical", + number=lambda: "numeric", + string=lambda: "character", + object=lambda: "named list", + any=lambda: "logical | numeric | character | " + "named list | unnamed list", + element=lambda: "dash component", + node=lambda: "a list of or a singular dash " + "component, string or number", + # React's PropTypes.oneOf + enum=lambda: "a value equal to: {}".format( + ", ".join("{}".format(str(t["value"])) + for t in type_object["value"]) + ), + # React's PropTypes.oneOfType + union=lambda: "{}".format( + " | ".join( + "{}".format(get_r_type(subType)) + for subType in type_object["value"] + if get_r_type(subType) != "" + ) + ), + # React's PropTypes.arrayOf + arrayOf=lambda: ( + "list" + ((" of {}s").format( + get_r_type(type_object["value"])) + if get_r_type(type_object["value"]) != "" + else "") + ), + # React's PropTypes.objectOf + objectOf=lambda: ( + "list with named elements and values of type {}" + ).format( + get_r_type(type_object["value"]) + ), + + # React's PropTypes.shape + shape=shape_or_exact, + # React's PropTypes.exact + exact=shape_or_exact + ) + + +def get_r_type(type_object, is_flow_type=False, indent_num=0): + """ + Convert JS types to R types for the component definition + Parameters + ---------- + type_object: dict + react-docgen-generated prop type dictionary + + indent_num: int + Number of indents to use for the docstring for the prop + Returns + ------- + str + Python type string + """ + js_type_name = type_object["name"] + js_to_r_types = get_r_prop_types(type_object=type_object) + if ( + "computed" in type_object + and type_object["computed"] + or type_object.get("type", "") == "function" + ): + return "" + elif js_type_name in js_to_r_types: + prop_type = js_to_r_types[js_type_name]() + return prop_type + return "" + + +def print_r_type(typedata): + typestring = get_r_type(typedata).capitalize() + if typestring: + typestring += ". " + return typestring + + +# pylint: disable=too-many-arguments +def create_prop_docstring_r(prop_name, type_object, required, description, + indent_num, is_flow_type=False): + """ + Create the Dash component prop docstring + Parameters + ---------- + prop_name: str + Name of the Dash component prop + type_object: dict + react-docgen-generated prop type dictionary + required: bool + Component is required? + description: str + Dash component description + indent_num: int + Number of indents to use for the context block + (creates 2 spaces for every indent) + is_flow_type: bool + Does the prop use Flow types? Otherwise, uses PropTypes + Returns + ------- + str + Dash component prop docstring + """ + r_type_name = get_r_type( + type_object=type_object, + is_flow_type=is_flow_type, + indent_num=indent_num + 1) + + indent_spacing = ' ' * indent_num + if '\n' in r_type_name: + return '{indent_spacing}- {name} ({is_required}): {description}. ' \ + '{name} has the following type: {type}'.format( + indent_spacing=indent_spacing, + name=prop_name, + type=r_type_name, + description=description, + is_required='required' if required else 'optional') + return '{indent_spacing}- {name} ({type}' \ + '{is_required}){description}'.format( + indent_spacing=indent_spacing, + name=prop_name, + type='{}; '.format(r_type_name) if r_type_name else '', + description=( + ': {}'.format(description) if description != '' else '' + ), + is_required='required' if required else 'optional')
plotly/dash
c2659ae09d6e271f2ca3552df998ede9125cc3cb
diff --git a/tests/unit/dash/development/metadata_test.py b/tests/unit/dash/development/metadata_test.py index 785feae6..edb67126 100644 --- a/tests/unit/dash/development/metadata_test.py +++ b/tests/unit/dash/development/metadata_test.py @@ -19,7 +19,7 @@ Keyword arguments: - optionalElement (dash component; optional) - optionalEnum (a value equal to: 'News', 'Photos'; optional) - optionalUnion (string | number; optional) -- optionalArrayOf (list; optional) +- optionalArrayOf (list of numbers; optional) - optionalObjectOf (dict with strings as keys and values of type number; optional) - optionalObjectWithExactAndNestedDescription (optional): . optionalObjectWithExactAndNestedDescription has the following type: dict containing keys 'color', 'fontSize', 'figure'. Those keys have the following types: @@ -27,7 +27,7 @@ Those keys have the following types: - fontSize (number; optional) - figure (optional): Figure is a plotly graph object. figure has the following type: dict containing keys 'data', 'layout'. Those keys have the following types: - - data (list; optional): data is a collection of traces + - data (list of dicts; optional): data is a collection of traces - layout (dict; optional): layout describes the rest of the figure - optionalObjectWithShapeAndNestedDescription (optional): . optionalObjectWithShapeAndNestedDescription has the following type: dict containing keys 'color', 'fontSize', 'figure'. Those keys have the following types: @@ -35,7 +35,7 @@ Those keys have the following types: - fontSize (number; optional) - figure (optional): Figure is a plotly graph object. figure has the following type: dict containing keys 'data', 'layout'. Those keys have the following types: - - data (list; optional): data is a collection of traces + - data (list of dicts; optional): data is a collection of traces - layout (dict; optional): layout describes the rest of the figure - optionalAny (boolean | number | string | dict | list; optional) - customProp (optional) diff --git a/tests/unit/dash/development/test_base_component.py b/tests/unit/dash/development/test_base_component.py index 739590cc..9040a446 100644 --- a/tests/unit/dash/development/test_base_component.py +++ b/tests/unit/dash/development/test_base_component.py @@ -540,15 +540,19 @@ class TestGenerateClassFile(unittest.TestCase): for line in s.split('\n'): self.assertEqual(line, line.rstrip()) + def match_lines(self, val, expected): + for val1, exp1 in zip(val.splitlines(), expected.splitlines()): + assert val1 == exp1 + def test_class_string(self): - self.assertEqual( + self.match_lines( self.expected_class_string, self.component_class_string ) self.assert_no_trailing_spaces(self.component_class_string) def test_class_file(self): - self.assertEqual( + self.match_lines( self.expected_class_string, self.written_class_string ) @@ -801,7 +805,7 @@ class TestMetaDataConversions(unittest.TestCase): ['optionalUnion', 'string | number'], - ['optionalArrayOf', 'list'], + ['optionalArrayOf', 'list of numbers'], ['optionalObjectOf', 'dict with strings as keys and values of type number'], @@ -814,7 +818,7 @@ class TestMetaDataConversions(unittest.TestCase): " - fontSize (number; optional)", " - figure (optional): Figure is a plotly graph object. figure has the following type: dict containing keys 'data', 'layout'.", # noqa: E501 "Those keys have the following types:", - " - data (list; optional): data is a collection of traces", + " - data (list of dicts; optional): data is a collection of traces", " - layout (dict; optional): layout describes the rest of the figure" # noqa: E501 ])], @@ -827,7 +831,7 @@ class TestMetaDataConversions(unittest.TestCase): " - fontSize (number; optional)", " - figure (optional): Figure is a plotly graph object. figure has the following type: dict containing keys 'data', 'layout'.", # noqa: E501 "Those keys have the following types:", - " - data (list; optional): data is a collection of traces", + " - data (list of dicts; optional): data is a collection of traces", " - layout (dict; optional): layout describes the rest of the figure" # noqa: E501 ])], @@ -888,7 +892,7 @@ def assert_docstring(assertEqual, docstring): "- optionalElement (dash component; optional)", "- optionalEnum (a value equal to: 'News', 'Photos'; optional)", "- optionalUnion (string | number; optional)", - "- optionalArrayOf (list; optional)", + "- optionalArrayOf (list of numbers; optional)", "- optionalObjectOf (dict with strings as keys and values " "of type number; optional)", @@ -907,7 +911,7 @@ def assert_docstring(assertEqual, docstring): "keys 'data', 'layout'.", "Those keys have the following types:", - " - data (list; optional): data is a collection of traces", + " - data (list of dicts; optional): data is a collection of traces", " - layout (dict; optional): layout describes " "the rest of the figure", @@ -926,7 +930,7 @@ def assert_docstring(assertEqual, docstring): "keys 'data', 'layout'.", "Those keys have the following types:", - " - data (list; optional): data is a collection of traces", + " - data (list of dicts; optional): data is a collection of traces", " - layout (dict; optional): layout describes " "the rest of the figure",
DashR transpiler: type and default information in help strings Let's make sure that the R equivalent of docstrings has the same type/default information as the Python docstrings for components.
0.0
c2659ae09d6e271f2ca3552df998ede9125cc3cb
[ "tests/unit/dash/development/test_base_component.py::TestGenerateClassFile::test_class_file", "tests/unit/dash/development/test_base_component.py::TestGenerateClassFile::test_class_string", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_docstring", "tests/unit/dash/development/test_base_component.py::TestMetaDataConversions::test_docgen_to_python_args", "tests/unit/dash/development/test_base_component.py::TestMetaDataConversions::test_docstring" ]
[ "tests/unit/dash/development/test_base_component.py::TestComponent::test_del_item_from_class", "tests/unit/dash/development/test_base_component.py::TestComponent::test_del_item_from_list", "tests/unit/dash/development/test_base_component.py::TestComponent::test_del_item_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/dash/development/test_base_component.py::TestComponent::test_get_item_raises_key_if_id_doesnt_exist", "tests/unit/dash/development/test_base_component.py::TestComponent::test_get_item_with_children", "tests/unit/dash/development/test_base_component.py::TestComponent::test_get_item_with_children_as_component_instead_of_list", "tests/unit/dash/development/test_base_component.py::TestComponent::test_get_item_with_nested_children_one_branch", "tests/unit/dash/development/test_base_component.py::TestComponent::test_get_item_with_nested_children_two_branches", "tests/unit/dash/development/test_base_component.py::TestComponent::test_get_item_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/dash/development/test_base_component.py::TestComponent::test_init", "tests/unit/dash/development/test_base_component.py::TestComponent::test_iter", "tests/unit/dash/development/test_base_component.py::TestComponent::test_len", "tests/unit/dash/development/test_base_component.py::TestComponent::test_len_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/dash/development/test_base_component.py::TestComponent::test_set_item", "tests/unit/dash/development/test_base_component.py::TestComponent::test_set_item_raises_key_error", "tests/unit/dash/development/test_base_component.py::TestComponent::test_set_item_with_children_as_list", "tests/unit/dash/development/test_base_component.py::TestComponent::test_set_item_with_nested_children", "tests/unit/dash/development/test_base_component.py::TestComponent::test_set_item_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/dash/development/test_base_component.py::TestComponent::test_to_plotly_json_with_children", "tests/unit/dash/development/test_base_component.py::TestComponent::test_to_plotly_json_with_nested_children", "tests/unit/dash/development/test_base_component.py::TestComponent::test_to_plotly_json_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/dash/development/test_base_component.py::TestComponent::test_to_plotly_json_with_null_arguments", "tests/unit/dash/development/test_base_component.py::TestComponent::test_to_plotly_json_with_wildcards", "tests/unit/dash/development/test_base_component.py::TestComponent::test_to_plotly_json_without_children", "tests/unit/dash/development/test_base_component.py::TestComponent::test_traverse_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/dash/development/test_base_component.py::TestComponent::test_traverse_with_tuples", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_arguments_become_attributes", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_attrs_match_forbidden_props", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_call_signature", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_no_events", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_repr_multiple_arguments", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_repr_nested_arguments", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_repr_single_default_argument", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_repr_single_non_default_argument", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_repr_with_wildcards", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_required_props", "tests/unit/dash/development/test_base_component.py::TestGenerateClass::test_to_plotly_json", "tests/unit/dash/development/test_base_component.py::TestFlowMetaDataConversions::test_docgen_to_python_args", "tests/unit/dash/development/test_base_component.py::TestFlowMetaDataConversions::test_docstring" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-05-21 06:25:17+00:00
mit
4,613
plotly__dash-796
diff --git a/dash/development/_py_components_generation.py b/dash/development/_py_components_generation.py index 3fcd96c8..61bfd9b2 100644 --- a/dash/development/_py_components_generation.py +++ b/dash/development/_py_components_generation.py @@ -436,6 +436,9 @@ def create_prop_docstring(prop_name, type_object, required, description, else: default = default['value'] + if default in ['true', 'false']: + default = default.title() + is_required = 'optional' if required: is_required = 'required'
plotly/dash
ba27febf71caf112294c53e568d4ff407c5a10ee
diff --git a/tests/unit/development/test_base_component.py b/tests/unit/development/test_base_component.py index c39fe9a7..c34c655d 100644 --- a/tests/unit/development/test_base_component.py +++ b/tests/unit/development/test_base_component.py @@ -1041,7 +1041,7 @@ def assert_flow_docstring(assertEqual, docstring): "Keyword arguments:", "- requiredString (string; required): A required string", "- optionalString (string; default ''): A string that isn't required.", - "- optionalBoolean (boolean; default false): A boolean test", + "- optionalBoolean (boolean; default False): A boolean test", "- optionalNode (a list of or a singular dash component, string or number; optional): " "A node test",
[BUG] Auto-generated docstrings contain JS boolean values instead of Python boolean values Prompted by https://github.com/plotly/dash-bio/pull/379#discussion_r297840872 While `true` and `false` are not capitalized in JavaScript, they are capitalized in Python. The Python components' docstrings should reflect this, since other JS types are "translated" into Python types (e.g., `PropTypes.shape` -> `dict`).
0.0
ba27febf71caf112294c53e568d4ff407c5a10ee
[ "tests/unit/development/test_base_component.py::TestFlowMetaDataConversions::test_docstring" ]
[ "tests/unit/development/test_base_component.py::TestComponent::test_del_item_from_class", "tests/unit/development/test_base_component.py::TestComponent::test_del_item_from_list", "tests/unit/development/test_base_component.py::TestComponent::test_del_item_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/development/test_base_component.py::TestComponent::test_get_item_raises_key_if_id_doesnt_exist", "tests/unit/development/test_base_component.py::TestComponent::test_get_item_with_children", "tests/unit/development/test_base_component.py::TestComponent::test_get_item_with_children_as_component_instead_of_list", "tests/unit/development/test_base_component.py::TestComponent::test_get_item_with_nested_children_one_branch", "tests/unit/development/test_base_component.py::TestComponent::test_get_item_with_nested_children_two_branches", "tests/unit/development/test_base_component.py::TestComponent::test_get_item_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/development/test_base_component.py::TestComponent::test_init", "tests/unit/development/test_base_component.py::TestComponent::test_iter", "tests/unit/development/test_base_component.py::TestComponent::test_len", "tests/unit/development/test_base_component.py::TestComponent::test_len_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/development/test_base_component.py::TestComponent::test_set_item", "tests/unit/development/test_base_component.py::TestComponent::test_set_item_raises_key_error", "tests/unit/development/test_base_component.py::TestComponent::test_set_item_with_children_as_list", "tests/unit/development/test_base_component.py::TestComponent::test_set_item_with_nested_children", "tests/unit/development/test_base_component.py::TestComponent::test_set_item_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/development/test_base_component.py::TestComponent::test_to_plotly_json_with_children", "tests/unit/development/test_base_component.py::TestComponent::test_to_plotly_json_with_nested_children", "tests/unit/development/test_base_component.py::TestComponent::test_to_plotly_json_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/development/test_base_component.py::TestComponent::test_to_plotly_json_with_null_arguments", "tests/unit/development/test_base_component.py::TestComponent::test_to_plotly_json_with_wildcards", "tests/unit/development/test_base_component.py::TestComponent::test_to_plotly_json_without_children", "tests/unit/development/test_base_component.py::TestComponent::test_traverse_with_nested_children_with_mixed_strings_and_without_lists", "tests/unit/development/test_base_component.py::TestComponent::test_traverse_with_tuples", "tests/unit/development/test_base_component.py::TestGenerateClassFile::test_class_file", "tests/unit/development/test_base_component.py::TestGenerateClassFile::test_class_string", "tests/unit/development/test_base_component.py::TestGenerateClass::test_arguments_become_attributes", "tests/unit/development/test_base_component.py::TestGenerateClass::test_attrs_match_forbidden_props", "tests/unit/development/test_base_component.py::TestGenerateClass::test_call_signature", "tests/unit/development/test_base_component.py::TestGenerateClass::test_docstring", "tests/unit/development/test_base_component.py::TestGenerateClass::test_no_events", "tests/unit/development/test_base_component.py::TestGenerateClass::test_repr_multiple_arguments", "tests/unit/development/test_base_component.py::TestGenerateClass::test_repr_nested_arguments", "tests/unit/development/test_base_component.py::TestGenerateClass::test_repr_single_default_argument", "tests/unit/development/test_base_component.py::TestGenerateClass::test_repr_single_non_default_argument", "tests/unit/development/test_base_component.py::TestGenerateClass::test_repr_with_wildcards", "tests/unit/development/test_base_component.py::TestGenerateClass::test_required_props", "tests/unit/development/test_base_component.py::TestGenerateClass::test_to_plotly_json", "tests/unit/development/test_base_component.py::TestMetaDataConversions::test_docgen_to_python_args", "tests/unit/development/test_base_component.py::TestMetaDataConversions::test_docstring", "tests/unit/development/test_base_component.py::TestFlowMetaDataConversions::test_docgen_to_python_args" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2019-06-26 20:45:34+00:00
mit
4,614
plotly__dash-bio-589
diff --git a/CHANGELOG.md b/CHANGELOG.md index 1229b672..9e5c47f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## Unreleased - ??? + +### Changed +* [#589](https://github.com/plotly/dash-bio/pull/589) Removed hardcoded clustergram linkage method, added parameter `link_method` instead. + ## [0.8.0] - 2021-09-27 ### Fixed diff --git a/dash_bio/component_factory/_clustergram.py b/dash_bio/component_factory/_clustergram.py index 8ac4258f..33a143b4 100644 --- a/dash_bio/component_factory/_clustergram.py +++ b/dash_bio/component_factory/_clustergram.py @@ -26,7 +26,8 @@ def Clustergram( row_dist="euclidean", col_dist="euclidean", dist_fun=scs.distance.pdist, - link_fun=lambda x, **kwargs: sch.linkage(x, "complete", **kwargs), + link_fun=None, + link_method=None, color_threshold=None, optimal_leaf_order=False, color_map=None, @@ -87,6 +88,11 @@ Keyword arguments: - link_fun (function; default scipy.cluster.hierarchy.linkage): Function to compute the linkage matrix from the pairwise distances (see docs for scipy.cluster.hierarchy.linkage). +- link_method (string; default 'complete'): The linkage algorithm to use + if link_fun not set. For method 'single', an optimized algorithm based + on minimum spanning, for methods 'complete', 'average', 'weighted' and + 'ward', an algorithm called nearest-neighbors chain is implemented + (see docs for scipy.cluster.hierarchy.linkage). - color_threshold (dict; default {'row': 0, 'col': 0}): Maximum linkage value for which unique colors are assigned to clusters; 'row' for rows, and 'col' for columns. @@ -162,6 +168,7 @@ Keyword arguments: - width (number; default 500): The width of the graph, in px. """ + if color_threshold is None: color_threshold = dict(row=0, col=0) @@ -209,7 +216,8 @@ Methods: row_dist="euclidean", col_dist="euclidean", dist_fun=scs.distance.pdist, - link_fun=lambda x, **kwargs: sch.linkage(x, "complete", **kwargs), + link_fun=None, + link_method=None, color_threshold=None, optimal_leaf_order=False, color_map=None, @@ -246,6 +254,15 @@ Methods: # Always keep unique identifiers for columns column_ids = list(range(data.shape[1])) + if link_method is None: + link_method = "complete" + + if link_fun is None: + def linkage(x, **kwargs): + return sch.linkage(x, link_method, **kwargs) + + link_fun = linkage + self._data = data self._row_labels = row_labels self._row_ids = row_ids
plotly/dash-bio
243c371559b15db8bbb7304625be292e989b8207
diff --git a/tests/unit/test_clustergram.py b/tests/unit/test_clustergram.py index 4bb130f2..b03643cc 100644 --- a/tests/unit/test_clustergram.py +++ b/tests/unit/test_clustergram.py @@ -83,3 +83,18 @@ def test_column_labels(): clustered_data = CLUSTERED_DATA.T assert np.array_equal(curves_dict['heatmap']['z'], clustered_data) + + +def test_link_method(): + """Test that specifying linkage method.""" + + data = DATA + _, _, curves_dict = Clustergram( + data, + generate_curves_dict=True, + return_computed_traces=True, + center_values=False, + link_method='centroid' + ) + clustered_data = CLUSTERED_DATA + assert not np.array_equal(curves_dict['heatmap']['z'], clustered_data)
Clustergram: Why is complete linkage hard-coded? https://github.com/plotly/dash-bio/blob/a471d1d51ac361e04242ae7424af20f32fb26fce/dash_bio/component_factory/_clustergram.py#L29 Shouldn't this also be a kwarg?
0.0
243c371559b15db8bbb7304625be292e989b8207
[ "tests/unit/test_clustergram.py::test_link_method" ]
[ "tests/unit/test_clustergram.py::test_cluster_rows", "tests/unit/test_clustergram.py::test_read_dataframe", "tests/unit/test_clustergram.py::test_row_labels", "tests/unit/test_clustergram.py::test_column_labels" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-10-30 07:46:58+00:00
mit
4,615
pndurette__gTTS-157
diff --git a/MANIFEST.in b/MANIFEST.in index 82ac039..6527eaa 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include README.rst +include README.md include CHANGELOG.rst include CONTRIBUTING.rst -include LICENSE +include LICENSE \ No newline at end of file diff --git a/gtts/tokenizer/symbols.py b/gtts/tokenizer/symbols.py index 253aae7..1670360 100644 --- a/gtts/tokenizer/symbols.py +++ b/gtts/tokenizer/symbols.py @@ -14,3 +14,5 @@ ALL_PUNC = u"?!?!.,¡()[]¿…‥،;:—。,、:\n" TONE_MARKS = u"?!?!" PERIOD_COMMA = u".," + +COLON = u":" diff --git a/gtts/tokenizer/tokenizer_cases.py b/gtts/tokenizer/tokenizer_cases.py index 1d4f6ba..e48cae0 100644 --- a/gtts/tokenizer/tokenizer_cases.py +++ b/gtts/tokenizer/tokenizer_cases.py @@ -31,6 +31,18 @@ def period_comma(): pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex +def colon(): + """Colon case. + + Match a colon ":" only if not preceeded by a digit. + Mainly to prevent a cut in the middle of time notations e.g. 10:01 + + """ + return RegexBuilder( + pattern_args=symbols.COLON, + pattern_func=lambda x: r"(?<!\d){}".format(x)).regex + + def other_punctuation(): """Match other punctuation. @@ -38,10 +50,11 @@ def other_punctuation(): inserts a break in speech. """ - punc = ''.join(( + punc = ''.join( set(symbols.ALL_PUNC) - set(symbols.TONE_MARKS) - - set(symbols.PERIOD_COMMA))) + set(symbols.PERIOD_COMMA) - + set(symbols.COLON)) return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex diff --git a/gtts/tts.py b/gtts/tts.py index db20ea0..bc7ddfe 100644 --- a/gtts/tts.py +++ b/gtts/tts.py @@ -57,6 +57,7 @@ class gTTS: Tokenizer([ tokenizer_cases.tone_marks, tokenizer_cases.period_comma, + tokenizer_cases.colon, tokenizer_cases.other_punctuation ]).run @@ -97,6 +98,7 @@ class gTTS: tokenizer_func=Tokenizer([ tokenizer_cases.tone_marks, tokenizer_cases.period_comma, + tokenizer_cases.colon, tokenizer_cases.other_punctuation ]).run ): diff --git a/news/135.feature b/news/135.feature new file mode 100644 index 0000000..f692ce4 --- /dev/null +++ b/news/135.feature @@ -0,0 +1,1 @@ +Added new tokenizer case for ':' preventing cut in the middle of a time notation diff --git a/news/159.misc b/news/159.misc new file mode 100644 index 0000000..15342b0 --- /dev/null +++ b/news/159.misc @@ -0,0 +1,1 @@ +Updated MANIFEST.in to contain new README.md
pndurette/gTTS
ced9986a6b309778efd307ea75b6aff9c0ff4677
diff --git a/gtts/tokenizer/tests/test_tokenizer_cases.py b/gtts/tokenizer/tests/test_tokenizer_cases.py index 771a297..13e63f2 100644 --- a/gtts/tokenizer/tests/test_tokenizer_cases.py +++ b/gtts/tokenizer/tests/test_tokenizer_cases.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import unittest -from gtts.tokenizer.tokenizer_cases import tone_marks, period_comma, other_punctuation, legacy_all_punctuation +from gtts.tokenizer.tokenizer_cases import tone_marks, period_comma, colon, other_punctuation, legacy_all_punctuation from gtts.tokenizer import Tokenizer, symbols @@ -17,12 +17,19 @@ class TestPreTokenizerCases(unittest.TestCase): _out = ['Hello', "it's 24.5 degrees in the U.K. today", '$20,000,000.'] self.assertEqual(t.run(_in), _out) + def test_colon(self): + t = Tokenizer([colon]) + _in = "It's now 6:30 which means: morning missing:space" + _out = ["It's now 6:30 which means", ' morning missing', 'space'] + self.assertEqual(t.run(_in), _out) + def test_other_punctuation(self): # String of the unique 'other punctuations' other_punc_str = ''.join( set(symbols.ALL_PUNC) - set(symbols.TONE_MARKS) - - set(symbols.PERIOD_COMMA)) + set(symbols.PERIOD_COMMA) - + set(symbols.COLON)) t = Tokenizer([other_punctuation]) self.assertEqual(len(t.run(other_punc_str)) - 1, len(other_punc_str))
Adjust Tokenizer Hello, very often the gTTS Speech Synthesis does not spell the current time correctly since the tokenizer splits it. Example: > ... the current time is 08:30 o'clock. The text is it is split on ":" Is there a possibility to prevent a split on ":" in general? Thanks!
0.0
ced9986a6b309778efd307ea75b6aff9c0ff4677
[ "gtts/tokenizer/tests/test_tokenizer_cases.py::TestPreTokenizerCases::test_colon", "gtts/tokenizer/tests/test_tokenizer_cases.py::TestPreTokenizerCases::test_legacy_all_punctuation", "gtts/tokenizer/tests/test_tokenizer_cases.py::TestPreTokenizerCases::test_other_punctuation", "gtts/tokenizer/tests/test_tokenizer_cases.py::TestPreTokenizerCases::test_period_comma", "gtts/tokenizer/tests/test_tokenizer_cases.py::TestPreTokenizerCases::test_tone_marks" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2018-12-09 08:14:42+00:00
mit
4,616
pndurette__gTTS-245
diff --git a/README.md b/README.md index 09433aa..822c289 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,6 @@ Write spoken `mp3` data to a file, a file-like object (bytestring) for further a - Customizable speech-specific sentence tokenizer that allows for unlimited lengths of text to be read, all while keeping proper intonation, abbreviations, decimals and more; - Customizable text pre-processors which can, for example, provide pronunciation corrections; -- Automatic retrieval of supported languages. ### Installation diff --git a/docs/cli.rst b/docs/cli.rst index ea5255a..39745a2 100644 --- a/docs/cli.rst +++ b/docs/cli.rst @@ -16,10 +16,6 @@ List available languages:: $ gtts-cli --all -List available languages (Italian names):: - - $ gtts-cli --tld it --all - Read 'hello' to ``hello.mp3``:: $ gtts-cli 'hello' --output hello.mp3 diff --git a/gtts/cli.py b/gtts/cli.py index 6d90ac6..f7c3f82 100644 --- a/gtts/cli.py +++ b/gtts/cli.py @@ -60,14 +60,12 @@ def validate_text(ctx, param, text): def validate_lang(ctx, param, lang): """Validation callback for the <lang> option. Ensures <lang> is a supported language unless the <nocheck> flag is set - Uses <tld> to fetch languages from other domains """ if ctx.params['nocheck']: return lang try: - tld = ctx.params['tld'] - if lang not in tts_langs(tld): + if lang not in tts_langs(): raise click.UsageError( "'%s' not in list of supported languages.\n" "Use --all to list languages or " @@ -92,14 +90,7 @@ def print_languages(ctx, param, value): return try: - tld = ctx.params['tld'] - except KeyError: - # Either --tld was used after --all or not at all - # Default to the 'com' tld - tld = 'com' - - try: - langs = tts_langs(tld) + langs = tts_langs() langs_str_list = sorted("{}: {}".format(k, langs[k]) for k in langs) click.echo(' ' + '\n '.join(langs_str_list)) except RuntimeError as e: # pragma: no cover @@ -167,8 +158,7 @@ def set_debug(ctx, param, debug): is_eager=True, expose_value=False, callback=print_languages, - help="Print all documented available IETF language tags and exit. " - "Use --tld beforehand to use an alternate domain") + help="Print all documented available IETF language tags and exit.") @click.option( '--debug', default=False, diff --git a/gtts/lang.py b/gtts/lang.py index 4cb8a40..089c84c 100644 --- a/gtts/lang.py +++ b/gtts/lang.py @@ -1,9 +1,5 @@ # -*- coding: utf-8 -*- -from gtts.utils import _translate_url -from bs4 import BeautifulSoup -import requests import logging -import re __all__ = ['tts_langs'] @@ -12,14 +8,9 @@ log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -def tts_langs(tld="com"): +def tts_langs(): """Languages Google Text-to-Speech supports. - Args: - tld (string): Top-level domain for the Google Translate host - to fetch languages from. i.e `https://translate.google.<tld>`. - Default is ``com``. - Returns: dict: A dictionary of the type `{ '<lang>': '<name>'}` @@ -29,60 +20,86 @@ def tts_langs(tld="com"): The dictionary returned combines languages from two origins: - - Languages fetched automatically from Google Translate + - Languages fetched from Google Translate - Languages that are undocumented variations that were observed to work and present different dialects or accents. """ - try: - langs = dict() - log.debug("Fetching with '{}' tld".format(tld)) - langs.update(_fetch_langs(tld)) - langs.update(_extra_langs()) - log.debug("langs: {}".format(langs)) - return langs - except Exception as e: - raise RuntimeError("Unable to get language list: {}".format(str(e))) - - -def _fetch_langs(tld="com"): - """Fetch (scrape) languages from Google Translate. - - Google Translate loads a JavaScript Array of 'languages codes' that can - be spoken. We intersect this list with all the languages Google Translate - provides to get the ones that support text-to-speech. - - Args: - tld (string): Top-level domain for the Google Translate host - to fetch languages from. i.e `https://translate.google.<tld>`. - The language names obtained will be in a language locale of the TLD - (e.g. ``tld=fr`` will retrieve the French names of the languages). - Default is ``com``. - - Returns: - dict: A dictionnary of languages from Google Translate + langs = dict() + langs.update(_main_langs()) + langs.update(_extra_langs()) + log.debug("langs: {}".format(langs)) + return langs - """ - # Load JavaScript - js_contents = requests.get('https://ssl.gstatic.com/inputtools/js/ln/17/en.js').text +def _main_langs(): + """Define the main languages. - # Approximately extract TTS-enabled language codes - # RegEx pattern search because minified variables can change. - # Extra garbage will be dealt with later as we keep languages only. - # In: "[...]Fv={af:1,ar:1,[...],zh:1,"zh-cn":1,"zh-tw":1}[...]" - # Out: ['is', '12', [...], 'af', 'ar', [...], 'zh', 'zh-cn', 'zh-tw'] - pattern = r'[{,\"](\w{2}|\w{2}-\w{2,3})(?=:1|\":1)' - tts_langs = re.findall(pattern, js_contents) + Returns: + dict: A dictionnary of the main languages extracted from + Google Translate. - # Build lang. dict. from main page (JavaScript object populating lang. menu) - # Filtering with the TTS-enabled languages - # In: "{code:'auto',name:'Detect language'},{code:'af',name:'Afrikaans'},[...]" - # re.findall: [('auto', 'Detect language'), ('af', 'Afrikaans'), [...]] - # Out: {'af': 'Afrikaans', [...]} - trans_pattern = r"{code:'(?P<lang>.+?[^'])',name:'(?P<name>.+?[^'])'}" - trans_langs = re.findall(trans_pattern, page.text) - return {lang: name for lang, name in trans_langs if lang in tts_langs} + """ + return { + 'af': 'Afrikaans', + 'ar': 'Arabic', + 'bn': 'Bengali', + 'bs': 'Bosnian', + 'ca': 'Catalan', + 'cs': 'Czech', + 'cy': 'Welsh', + 'da': 'Danish', + 'de': 'German', + 'el': 'Greek', + 'en': 'English', + 'eo': 'Esperanto', + 'es': 'Spanish', + 'et': 'Estonian', + 'fi': 'Finnish', + 'fr': 'French', + 'gu': 'Gujarati', + 'hi': 'Hindi', + 'hr': 'Croatian', + 'hu': 'Hungarian', + 'hy': 'Armenian', + 'id': 'Indonesian', + 'is': 'Icelandic', + 'it': 'Italian', + 'ja': 'Japanese', + 'jw': 'Javanese', + 'km': 'Khmer', + 'kn': 'Kannada', + 'ko': 'Korean', + 'la': 'Latin', + 'lv': 'Latvian', + 'mk': 'Macedonian', + 'ml': 'Malayalam', + 'mr': 'Marathi', + 'my': 'Myanmar (Burmese)', + 'ne': 'Nepali', + 'nl': 'Dutch', + 'no': 'Norwegian', + 'pl': 'Polish', + 'pt': 'Portuguese', + 'ro': 'Romanian', + 'ru': 'Russian', + 'si': 'Sinhala', + 'sk': 'Slovak', + 'sq': 'Albanian', + 'sr': 'Serbian', + 'su': 'Sundanese', + 'sv': 'Swedish', + 'sw': 'Swahili', + 'ta': 'Tamil', + 'te': 'Telugu', + 'th': 'Thai', + 'tl': 'Filipino', + 'tr': 'Turkish', + 'uk': 'Ukrainian', + 'ur': 'Urdu', + 'vi': 'Vietnamese', + 'zh-CN': 'Chinese' + } def _extra_langs(): @@ -91,7 +108,7 @@ def _extra_langs(): Returns: dict: A dictionnary of extra languages manually defined. - Variations of the ones fetched by `_fetch_langs`, + Variations of the ones fetched by `_main_langs`, observed to provide different dialects or accents or just simply accepted by the Google Translate Text-to-Speech API. diff --git a/gtts/tts.py b/gtts/tts.py index c722444..bdad2a4 100644 --- a/gtts/tts.py +++ b/gtts/tts.py @@ -132,7 +132,7 @@ class gTTS: # Language if lang_check: try: - langs = tts_langs(self.tld) + langs = tts_langs() if lang.lower() not in langs: raise ValueError("Language not supported: %s" % lang) except RuntimeError as e: @@ -288,7 +288,9 @@ class gTTS: decoded = base64.b64decode(as_bytes) fp.write(decoded) else: - raise gTTSError("No audio stream in response") + # Request successful, good response, + # no audio stream in response + raise gTTSError(tts=self, response=r) log.debug("part-%i written to %s", idx, fp) except (AttributeError, TypeError) as e: raise TypeError( @@ -348,9 +350,9 @@ class gTTSError(Exception): if status == 403: cause = "Bad token or upstream API changes" - elif status == 404 and not tts.lang_check: - cause = "Unsupported language '%s'" % self.tts.lang + elif status == 200 and not tts.lang_check: + cause = "No audio stream in response. Unsupported language '%s'" % self.tts.lang elif status >= 500: cause = "Uptream API error. Try again later." - return "{}. Probable cause: {}".format(premise, cause) + return "{}. Probable cause: {}".format(premise, cause) \ No newline at end of file
pndurette/gTTS
c01a71da23482cfe08f77dcd0765cc0645eee2c8
diff --git a/gtts/tests/test_cli.py b/gtts/tests/test_cli.py index b652180..b801bda 100644 --- a/gtts/tests/test_cli.py +++ b/gtts/tests/test_cli.py @@ -77,16 +77,6 @@ def test_all(): assert re.match(r"^(?:\s{2}(\w{2}|\w{2}-\w{2}): .+\n?)+$", result.output) assert result.exit_code == 0 [email protected] -def test_all_tld(): - """Option <all> should return a list of languages""" - result = runner(['--tld', 'it', '--all']) - - # Top-level domain set to 'it', language outputs should be Italian - - assert "en: Inglese" in result.output - assert result.exit_code == 0 - # <lang> tests @pytest.mark.net @@ -108,7 +98,7 @@ def test_lang_nocheck(): assert 'lang: xx' in log assert 'lang_check: False' in log - assert "Probable cause: Unsupported language 'xx'" in result.output + assert "Unsupported language 'xx'" in result.output assert result.exit_code != 0 # Param set tests diff --git a/gtts/tests/test_lang.py b/gtts/tests/test_lang.py index 3a094ca..564c596 100644 --- a/gtts/tests/test_lang.py +++ b/gtts/tests/test_lang.py @@ -1,35 +1,24 @@ # -*- coding: utf-8 -*- import pytest -from gtts.lang import tts_langs, _fetch_langs, _extra_langs +from gtts.lang import tts_langs, _main_langs, _extra_langs """Test language list downloading""" @pytest.mark.net -def test_fetch_langs(): +def test_main_langs(): """Fetch languages successfully""" # Downloaded Languages # Safe to assume 'en' (english) will always be there - scraped_langs = _fetch_langs() + scraped_langs = _main_langs() assert 'en' in scraped_langs - # Scraping garbage - assert 'Detect language' not in scraped_langs - assert '—' not in scraped_langs - # Add-in Languages all_langs = tts_langs() extra_langs = _extra_langs() assert len(all_langs) == len(scraped_langs) + len(extra_langs) [email protected] -def test_fetch_langs_exception(): - """Raise RuntimeError on language fetch exception""" - with pytest.raises(RuntimeError): - tts_langs(tld="invalid") - - if __name__ == '__main__': pytest.main(['-x', __file__]) diff --git a/gtts/tests/test_tts.py b/gtts/tests/test_tts.py index c042b13..fa67c1b 100644 --- a/gtts/tests/test_tts.py +++ b/gtts/tests/test_tts.py @@ -5,11 +5,11 @@ from mock import Mock from six.moves import urllib from gtts.tts import gTTS, gTTSError -from gtts.lang import _fetch_langs, _extra_langs +from gtts.lang import _main_langs, _extra_langs # Testing all languages takes some time. # Set TEST_LANGS envvar to choose languages to test. -# * 'fetch': Languages fetched from the Web +# * 'main': Languages extracted from the Web # * 'extra': Languagee set in Languages.EXTRA_LANGS # * 'all': All of the above # * <csv>: Languages tags list to test @@ -26,10 +26,10 @@ ex.: { 'environ' : ['en', 'fr'] } """ env = os.environ.get('TEST_LANGS') if not env or env == 'all': - langs = _fetch_langs() + langs = _main_langs() langs.update(_extra_langs()) -elif env == 'fetch': - langs = _fetch_langs() +elif env == 'main': + langs = _main_langs() elif env == 'extra': langs = _extra_langs() else: @@ -50,8 +50,8 @@ def test_TTS(tmp_path, lang): tts = gTTS(text=text, lang=lang, slow=slow) tts.save(filename) - # Check if files created is > 2k - assert filename.stat().st_size > 2000 + # Check if files created is > 1.5 + assert filename.stat().st_size > 1500 @pytest.mark.net @@ -115,6 +115,7 @@ def test_get_urls(): assert r.netloc == 'translate.google.com' assert r.path == '/_/TranslateWebserverUi/data/batchexecute' + @pytest.mark.net def test_get_bodies(): """get request bodies list""" @@ -152,11 +153,11 @@ def test_infer_msg(): error403 = gTTSError(tts=tts403, response=response403) assert error403.msg == "403 (aaa) from TTS API. Probable cause: Bad token or upstream API changes" - # 404 (and not lang_check) - tts404 = Mock(lang='xx', lang_check=False) - response404 = Mock(status_code=404, reason='bbb') - error404 = gTTSError(tts=tts404, response=response404) - assert error404.msg == "404 (bbb) from TTS API. Probable cause: Unsupported language 'xx'" + # 200 (and not lang_check) + tts200 = Mock(lang='xx', lang_check=False) + response404 = Mock(status_code=200, reason='bbb') + error200 = gTTSError(tts=tts200, response=response404) + assert error200.msg == "200 (bbb) from TTS API. Probable cause: No audio stream in response. Unsupported language 'xx'" # >= 500 tts500 = Mock()
gTTS slow on generating file Hi, I've used this library for almost 2 years. I runt a python script on a RPi3 and now when It start generating mp3 file from TTS conversion it locks for aroung 1 minute or more and no output file is generated. This behaviour seems to be randomic. Is there any issue between gTTS and Google Translation system?
0.0
c01a71da23482cfe08f77dcd0765cc0645eee2c8
[ "gtts/tests/test_cli.py::test_text_no_text_or_file", "gtts/tests/test_cli.py::test_text_text_and_file", "gtts/tests/test_cli.py::test_text_empty", "gtts/tests/test_cli.py::test_file_not_exists", "gtts/tests/test_cli.py::test_all", "gtts/tests/test_cli.py::test_lang_not_valid", "gtts/tests/test_cli.py::test_lang_nocheck", "gtts/tests/test_cli.py::test_params_set", "gtts/tests/test_cli.py::test_stdin_text", "gtts/tests/test_cli.py::test_stdin_text_unicode", "gtts/tests/test_cli.py::test_stdin_file", "gtts/tests/test_cli.py::test_stdin_file_unicode", "gtts/tests/test_cli.py::test_text", "gtts/tests/test_cli.py::test_text_unicode", "gtts/tests/test_cli.py::test_file_ascii", "gtts/tests/test_cli.py::test_file_utf8", "gtts/tests/test_cli.py::test_stdout", "gtts/tests/test_cli.py::test_file", "gtts/tests/test_lang.py::test_main_langs", "gtts/tests/test_tts.py::test_TTS[Afrikaans]", "gtts/tests/test_tts.py::test_TTS[Arabic]", "gtts/tests/test_tts.py::test_TTS[Bengali]", "gtts/tests/test_tts.py::test_TTS[Bosnian]", "gtts/tests/test_tts.py::test_TTS[Catalan]", "gtts/tests/test_tts.py::test_TTS[Czech]", "gtts/tests/test_tts.py::test_TTS[Welsh]", "gtts/tests/test_tts.py::test_TTS[Danish]", "gtts/tests/test_tts.py::test_TTS[German]", "gtts/tests/test_tts.py::test_TTS[Greek]", "gtts/tests/test_tts.py::test_TTS[English]", "gtts/tests/test_tts.py::test_TTS[Spanish]", "gtts/tests/test_tts.py::test_TTS[Estonian]", "gtts/tests/test_tts.py::test_TTS[Finnish]", "gtts/tests/test_tts.py::test_TTS[French]", "gtts/tests/test_tts.py::test_TTS[Gujarati]", "gtts/tests/test_tts.py::test_TTS[Hindi]", "gtts/tests/test_tts.py::test_TTS[Croatian]", "gtts/tests/test_tts.py::test_TTS[Hungarian]", "gtts/tests/test_tts.py::test_TTS[Indonesian]", "gtts/tests/test_tts.py::test_TTS[Icelandic]", "gtts/tests/test_tts.py::test_TTS[Italian]", "gtts/tests/test_tts.py::test_TTS[Japanese]", "gtts/tests/test_tts.py::test_TTS[Javanese]", "gtts/tests/test_tts.py::test_TTS[Khmer]", "gtts/tests/test_tts.py::test_TTS[Kannada]", "gtts/tests/test_tts.py::test_TTS[Korean]", "gtts/tests/test_tts.py::test_TTS[Latin]", "gtts/tests/test_tts.py::test_TTS[Latvian]", "gtts/tests/test_tts.py::test_TTS[Malayalam]", "gtts/tests/test_tts.py::test_TTS[Marathi]", "gtts/tests/test_tts.py::test_TTS[Myanmar", "gtts/tests/test_tts.py::test_TTS[Nepali]", "gtts/tests/test_tts.py::test_TTS[Dutch]", "gtts/tests/test_tts.py::test_TTS[Norwegian]", "gtts/tests/test_tts.py::test_TTS[Polish]", "gtts/tests/test_tts.py::test_TTS[Portuguese]", "gtts/tests/test_tts.py::test_TTS[Romanian]", "gtts/tests/test_tts.py::test_TTS[Russian]", "gtts/tests/test_tts.py::test_TTS[Sinhala]", "gtts/tests/test_tts.py::test_TTS[Slovak]", "gtts/tests/test_tts.py::test_TTS[Albanian]", "gtts/tests/test_tts.py::test_TTS[Serbian]", "gtts/tests/test_tts.py::test_TTS[Sundanese]", "gtts/tests/test_tts.py::test_TTS[Swedish]", "gtts/tests/test_tts.py::test_TTS[Swahili]", "gtts/tests/test_tts.py::test_TTS[Tamil]", "gtts/tests/test_tts.py::test_TTS[Telugu]", "gtts/tests/test_tts.py::test_TTS[Thai]", "gtts/tests/test_tts.py::test_TTS[Filipino]", "gtts/tests/test_tts.py::test_TTS[Turkish]", "gtts/tests/test_tts.py::test_TTS[Ukrainian]", "gtts/tests/test_tts.py::test_TTS[Urdu]", "gtts/tests/test_tts.py::test_TTS[Vietnamese]", "gtts/tests/test_tts.py::test_unsupported_language_check", "gtts/tests/test_tts.py::test_empty_string", "gtts/tests/test_tts.py::test_no_text_parts", "gtts/tests/test_tts.py::test_bad_fp_type", "gtts/tests/test_tts.py::test_save", "gtts/tests/test_tts.py::test_get_urls", "gtts/tests/test_tts.py::test_get_bodies", "gtts/tests/test_tts.py::test_msg", "gtts/tests/test_tts.py::test_infer_msg", "gtts/tests/test_tts.py::test_WebRequest" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-11-15 01:00:52+00:00
mit
4,617
pniedzwiedzinski__pseudo-25
diff --git a/Pipfile.lock b/Pipfile.lock index 35051ac..9e007b4 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -138,11 +138,11 @@ }, "pytest-cov": { "hashes": [ - "sha256:0ab664b25c6aa9716cbf203b17ddb301932383046082c081b9848a0edf5add33", - "sha256:230ef817450ab0699c6cc3c9c8f7a829c34674456f2ed8df1fe1d39780f7c87f" + "sha256:2b097cde81a302e1047331b48cadacf23577e431b61e9c6f49a1170bbe3d3da6", + "sha256:e00ea4fdde970725482f1f35630d12f074e121a23801aabf2ae154ec6bdd343a" ], "index": "pypi", - "version": "==2.6.1" + "version": "==2.7.1" }, "pytest-timeout": { "hashes": [ @@ -397,10 +397,10 @@ }, "urllib3": { "hashes": [ - "sha256:4c291ca23bbb55c76518905869ef34bdd5f0e46af7afe6861e8375643ffee1a0", - "sha256:9a247273df709c4fedb38c711e44292304f73f39ab01beda9f6b9fc375669ac3" + "sha256:2393a695cd12afedd0dcb26fe5d50d0cf248e5a66f75dbd89a3d4eb333a61af4", + "sha256:a637e5fae88995b256e3409dc4d52c2e2e0ba32c42a6365fee8bbd2238de3cfb" ], - "version": "==1.24.2" + "version": "==1.24.3" } } } diff --git a/docs/source/index.rst b/docs/source/index.rst index 872b3bc..08b44ca 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -4,6 +4,7 @@ Home .. toctree:: getting_started + stream runtime type/index diff --git a/docs/source/stream.rst b/docs/source/stream.rst new file mode 100644 index 0000000..5037b4f --- /dev/null +++ b/docs/source/stream.rst @@ -0,0 +1,7 @@ +Runtime +======= + +.. toctree:: + +.. automodule:: pseudo.stream + :members: \ No newline at end of file diff --git a/pseudo/__init__.py b/pseudo/__init__.py index 983644f..4ce8d6a 100644 --- a/pseudo/__init__.py +++ b/pseudo/__init__.py @@ -23,7 +23,7 @@ Example: __author__ = "Patryk Niedźwiedziński" -__version__ = "0.10.0" +__version__ = "0.10.1" import gc diff --git a/pseudo/lexer.py b/pseudo/lexer.py index 17e365e..d02c317 100644 --- a/pseudo/lexer.py +++ b/pseudo/lexer.py @@ -139,6 +139,8 @@ class Lexer: return read_while(self, indent_level) if keyword == "dla": return read_for(self, indent_level) + if keyword == "koniec": + return Statement(keyword) arg = self.read_args() arg = self.read_expression(arg) if keyword == "czytaj": @@ -241,7 +243,7 @@ class Lexer: return Value(c) if c == '"' or c == "'": - return read_string() + return read_string(self) if is_operator(c): return read_operator(self.i) diff --git a/pseudo/type/__init__.py b/pseudo/type/__init__.py index 8200ac3..27a8ebe 100644 --- a/pseudo/type/__init__.py +++ b/pseudo/type/__init__.py @@ -8,7 +8,6 @@ __author__ = "Patryk Niedźwiedziński" from pseudo.type.numbers import Int from pseudo.type.string import String from pseudo.type.bool import Bool -from pseudo.type.variable import Variable, Assignment from pseudo.type.base import Value, EOL
pniedzwiedzinski/pseudo
7b63b5305f35e3dd3f98cc7ff304cb634324931d
diff --git a/tests/lexer_test.py b/tests/lexer_test.py index 8b47ee1..e33e72b 100644 --- a/tests/lexer_test.py +++ b/tests/lexer_test.py @@ -5,7 +5,8 @@ import pseudo from pseudo.type.operation import Operation, Operator from pseudo.type.loop import Loop -from pseudo.type import Int, Statement, Bool, Variable, Assignment +from pseudo.type import Int, Statement, Bool +from pseudo.type.variable import Variable, Assignment from pseudo.stream import Stream, EOL, EndOfFile __author__ = "Patryk Niedźwiedziński" @@ -220,3 +221,4 @@ def test_read_indent_size(lexer): if lexer.indent_size != 1 or lexer.indent_char != "\t": raise AssertionError + diff --git a/tests/pseudo_test.py b/tests/pseudo_test.py index 66c7ceb..249c5a7 100644 --- a/tests/pseudo_test.py +++ b/tests/pseudo_test.py @@ -23,12 +23,14 @@ jeżeli a=1 to dopóki a < 2 wykonuj T[a] := a a:=a+1 -pisz a +pisz "2" dla i:=3,...,5 wykonuj dla x:=3,...,5 wykonuj T[x] <- x pisz x + +koniec """ diff --git a/tests/type_test/statement_test.py b/tests/type_test/statement_test.py new file mode 100644 index 0000000..01ebb22 --- /dev/null +++ b/tests/type_test/statement_test.py @@ -0,0 +1,16 @@ +"""This module contains tests for pseudo.type.Statement""" + +import pytest + +from pseudo.runtime import RunTime +from pseudo.type import Statement + + [email protected](2) +def test_exit(): + try: + Statement("koniec").eval(RunTime()) + except SystemExit: + pass + else: + raise AssertionError
[BUG] read_string **Describe the bug** Thanks! Just a few more steps to finish reporting. **Your pseudocode file** ``` pisz "nie" ``` **Log file** ``` File "/Users/pniedzwiedzinski/.local/share/virtualenvs/pseudo--geLLKRp/lib/python3.7/site-packages/pseudo/lexer.py", line 244, in read_next return read_string() TypeError: read_string() missing 1 required positional argument: 'lexer' ``` **Desktop (please complete the following information):** - OS: macOs - pseudo version (check with `pdc -v`) 0.10.0 **Additional context** How do you feel today? Great!
0.0
7b63b5305f35e3dd3f98cc7ff304cb634324931d
[ "tests/pseudo_test.py::test_main" ]
[ "tests/lexer_test.py::test_is_keyword", "tests/lexer_test.py::test_is_keyword_end", "tests/lexer_test.py::test_update_args", "tests/lexer_test.py::test_read_keyword", "tests/lexer_test.py::test_read_condition", "tests/lexer_test.py::test_read_args", "tests/lexer_test.py::test_read_expression", "tests/lexer_test.py::test_read_next", "tests/lexer_test.py::test_read_indent", "tests/lexer_test.py::test_read_indent_block", "tests/lexer_test.py::test_read_indent_size", "tests/pseudo_test.py::test_compile", "tests/type_test/statement_test.py::test_exit" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2019-05-05 16:33:47+00:00
mit
4,618
podhmo__swagger-marshmallow-codegen-74
diff --git a/swagger_marshmallow_codegen/codegen/config.py b/swagger_marshmallow_codegen/codegen/config.py index 429da80..56f8b2f 100644 --- a/swagger_marshmallow_codegen/codegen/config.py +++ b/swagger_marshmallow_codegen/codegen/config.py @@ -2,10 +2,11 @@ from __future__ import annotations import typing_extensions as tx -class ConfigDict(tx.TypedDict): +class ConfigDict(tx.TypedDict, total=False): schema: bool input: bool output: bool emit_schema_even_primitive_type: bool skip_header_comment: bool + header_comment: str diff --git a/swagger_marshmallow_codegen/codegen/v2/codegen.py b/swagger_marshmallow_codegen/codegen/v2/codegen.py index 568f432..ecf29f2 100644 --- a/swagger_marshmallow_codegen/codegen/v2/codegen.py +++ b/swagger_marshmallow_codegen/codegen/v2/codegen.py @@ -95,7 +95,9 @@ class SchemaWriter: logger.debug(" nested: %s, %s", caller_name, field_class_name) if opts: kwargs = LazyFormat(", {}", kwargs) - value = LazyFormat("{}(lambda: {}(){})", caller_name, field_class_name, kwargs) + value = LazyFormat( + "{}(lambda: {}(){})", caller_name, field_class_name, kwargs + ) else: if caller_name == "fields.Nested": caller_name = "fields.Field" @@ -488,9 +490,14 @@ class Codegen: def resolver(self) -> Resolver: return self.accessor.resolver - def write_header(self, c): - c.im.stmt("# -*- coding:utf-8 -*-") - c.im.stmt("# this is auto-generated by swagger-marshmallow-codegen") + def write_header(self, c, *, comment: t.Optional[str] = None): + if comment is None: + comment = """\ +# this is auto-generated by swagger-marshmallow-codegen +from __future__ import annotations +""" + for line in comment.splitlines(): + c.im.stmt(line) def write_import_(self, c): c.from_(*self.schema_class_path.rsplit(":", 1)) @@ -509,7 +516,7 @@ class Codegen: def codegen(self, d, ctx=None): c = ctx or Context() if not self.accessor.config.get("skip_header_comment", False): - self.write_header(c) + self.write_header(c, comment=self.accessor.config.get("header_comment")) c.m.sep() self.write_import_(c) self.write_body(c, d) diff --git a/swagger_marshmallow_codegen/codegen/v3/codegen.py b/swagger_marshmallow_codegen/codegen/v3/codegen.py index 4b8d214..0e27d70 100644 --- a/swagger_marshmallow_codegen/codegen/v3/codegen.py +++ b/swagger_marshmallow_codegen/codegen/v3/codegen.py @@ -1,1 +1,2 @@ from ..v2.codegen import Codegen +__all__ = ["Codegen"] diff --git a/swagger_marshmallow_codegen/dispatcher.py b/swagger_marshmallow_codegen/dispatcher.py index a5252be..2bf21f1 100644 --- a/swagger_marshmallow_codegen/dispatcher.py +++ b/swagger_marshmallow_codegen/dispatcher.py @@ -22,7 +22,7 @@ TYPE_MAP = { Pair(type="string", format=None): "marshmallow.fields:String", Pair(type="boolean", format=None): "marshmallow.fields:Boolean", Pair(type="string", format="uuid"): "marshmallow.fields:UUID", - Pair(type="string", format="date-time"): "marshmallow.fields:DateTime", + Pair(type="string", format="date-time"): "marshmallow.fields:AwareDateTime", Pair(type="string", format="date"): "marshmallow.fields:Date", Pair(type="string", format="time"): "marshmallow.fields:Time", Pair(type="string", format="email"): "marshmallow.fields:Email", diff --git a/swagger_marshmallow_codegen/resolver.py b/swagger_marshmallow_codegen/resolver.py index 844e9a2..50a350c 100644 --- a/swagger_marshmallow_codegen/resolver.py +++ b/swagger_marshmallow_codegen/resolver.py @@ -1,6 +1,5 @@ # -*- coding:utf-8 -*- import logging -import sys from collections import OrderedDict import dictknife from .langhelpers import titleize, normalize
podhmo/swagger-marshmallow-codegen
6d5dcfa88e8882a293434e3c3fcbf4837fd21c7d
diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00default.py b/swagger_marshmallow_codegen/tests/legacy_dst/00default.py index fc76808..090bfa1 100644 --- a/swagger_marshmallow_codegen/tests/legacy_dst/00default.py +++ b/swagger_marshmallow_codegen/tests/legacy_dst/00default.py @@ -10,7 +10,8 @@ class X(Schema): string = fields.String(missing=lambda: 'default') integer = fields.Integer(missing=lambda: 10) boolean = fields.Boolean(missing=lambda: True) - datetime = fields.DateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1)) + datetime = fields.AwareDateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc)) + date = fields.Date(missing=lambda: datetime.date(2000, 1, 1)) object = fields.Nested(lambda: XObject(), missing=lambda: OrderedDict([('name', 'foo'), ('age', 20)])) array = fields.List(fields.Integer(), missing=lambda: [1, 2, 3]) diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py b/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py index c71e243..0890aac 100644 --- a/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py +++ b/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py @@ -1,3 +1,4 @@ +# flake8: noqa from marshmallow import ( Schema, fields, diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py b/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py index b8effe3..3b4e48e 100644 --- a/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py +++ b/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py @@ -15,7 +15,7 @@ class Pet(Schema): name = fields.String(required=True, description="Pet's name", validate=[Length(min=1, max=100, equal=None)]) animal_type = fields.String(required=True, description='Kind of animal', validate=[Length(min=1, max=None, equal=None)]) tags = fields.Field(description='Custom tags') - created = fields.DateTime(description='Creation time', dump_only=True) + created = fields.AwareDateTime(description='Creation time', dump_only=True) class PetsInput: diff --git a/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml b/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml index df6f0e9..cad6cef 100644 --- a/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml +++ b/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml @@ -14,6 +14,10 @@ definitions: type: string format: date-time default: 2000-01-01T01:01:01Z + date: + type: string + format: date + default: 2000-01-01 object: type: object properties: diff --git a/swagger_marshmallow_codegen/tests/test_codegen_legacy.py b/swagger_marshmallow_codegen/tests/test_codegen_legacy.py index b3f67b9..d87dbd0 100644 --- a/swagger_marshmallow_codegen/tests/test_codegen_legacy.py +++ b/swagger_marshmallow_codegen/tests/test_codegen_legacy.py @@ -8,51 +8,53 @@ here = pathlib.Path(__file__).parent @pytest.mark.parametrize( - "src_file, dst_file", + "src_file, dst_file, header_comment", [ - ("./legacy_src/00person.yaml", "./legacy_dst/00person.py"), - ("./legacy_src/01person.yaml", "./legacy_dst/01person.py"), - ("./legacy_src/02person.yaml", "./legacy_dst/02person.py"), - ("./legacy_src/03person.yaml", "./legacy_dst/03person.py"), - ("./legacy_src/04person.yaml", "./legacy_dst/04person.py"), - ("./legacy_src/05person.yaml", "./legacy_dst/05person.py"), - ("./legacy_src/00commit.yaml", "./legacy_dst/00commit.py"), - ("./legacy_src/01commit.yaml", "./legacy_dst/01commit.py"), - ("./legacy_src/00emojis.yaml", "./legacy_dst/00emojis.py"), - ("./legacy_src/00stat.yaml", "./legacy_dst/00stat.py"), - ("./legacy_src/00default.yaml", "./legacy_dst/00default.py"), - ("./legacy_src/00maximum.yaml", "./legacy_dst/00maximum.py"), - ("./legacy_src/00length.yaml", "./legacy_dst/00length.py"), - ("./legacy_src/00regex.yaml", "./legacy_dst/00regex.py"), - ("./legacy_src/00enum.yaml", "./legacy_dst/00enum.py"), - ("./legacy_src/00items.yaml", "./legacy_dst/00items.py"), - ("./legacy_src/00readonly.yaml", "./legacy_dst/00readonly.py"), - ("./legacy_src/00allOf.yaml", "./legacy_dst/00allOf.py"), - ("./legacy_src/00allOf2.yaml", "./legacy_dst/00allOf2.py"), - ("./legacy_src/01allOf2.yaml", "./legacy_dst/01allOf2.py"), - ("./legacy_src/02allOf2.yaml", "./legacy_dst/02allOf2.py"), - ("./legacy_src/00paths.yaml", "./legacy_dst/00paths.py"), - ("./legacy_src/01paths.yaml", "./legacy_dst/01paths.py"), - ("./legacy_src/02paths.yaml", "./legacy_dst/02paths.py"), - ("./legacy_src/03paths.yaml", "./legacy_dst/03paths.py"), - ("./legacy_src/00empty.yaml", "./legacy_dst/00empty.py"), - ("./legacy_src/01empty.yaml", "./legacy_dst/01empty.py"), + ("./legacy_src/00person.yaml", "./legacy_dst/00person.py", ""), + ("./legacy_src/01person.yaml", "./legacy_dst/01person.py", ""), + ("./legacy_src/02person.yaml", "./legacy_dst/02person.py", ""), + ("./legacy_src/03person.yaml", "./legacy_dst/03person.py", ""), + ("./legacy_src/04person.yaml", "./legacy_dst/04person.py", ""), + ("./legacy_src/05person.yaml", "./legacy_dst/05person.py", ""), + ("./legacy_src/00commit.yaml", "./legacy_dst/00commit.py", ""), + ("./legacy_src/01commit.yaml", "./legacy_dst/01commit.py", ""), + ("./legacy_src/00emojis.yaml", "./legacy_dst/00emojis.py", ""), + ("./legacy_src/00stat.yaml", "./legacy_dst/00stat.py", ""), + ("./legacy_src/00default.yaml", "./legacy_dst/00default.py", ""), + ("./legacy_src/00maximum.yaml", "./legacy_dst/00maximum.py", ""), + ("./legacy_src/00length.yaml", "./legacy_dst/00length.py", ""), + ("./legacy_src/00regex.yaml", "./legacy_dst/00regex.py", ""), + ("./legacy_src/00enum.yaml", "./legacy_dst/00enum.py", ""), + ("./legacy_src/00items.yaml", "./legacy_dst/00items.py", ""), + ("./legacy_src/00readonly.yaml", "./legacy_dst/00readonly.py", ""), + ("./legacy_src/00allOf.yaml", "./legacy_dst/00allOf.py", ""), + ("./legacy_src/00allOf2.yaml", "./legacy_dst/00allOf2.py", ""), + ("./legacy_src/01allOf2.yaml", "./legacy_dst/01allOf2.py", ""), + ("./legacy_src/02allOf2.yaml", "./legacy_dst/02allOf2.py", ""), + ("./legacy_src/00paths.yaml", "./legacy_dst/00paths.py", ""), + ("./legacy_src/01paths.yaml", "./legacy_dst/01paths.py", ""), + ("./legacy_src/02paths.yaml", "./legacy_dst/02paths.py", ""), + ("./legacy_src/03paths.yaml", "./legacy_dst/03paths.py", ""), + ("./legacy_src/00empty.yaml", "./legacy_dst/00empty.py", "# flake8: noqa"), + ("./legacy_src/01empty.yaml", "./legacy_dst/01empty.py", ""), ( "./legacy_src/00list_with_options.yaml", "./legacy_dst/00list_with_options.py", + "", ), - ("./legacy_src/00reserved.yaml", "./legacy_dst/00reserved.py"), - ("./legacy_src/00typearray.yaml", "./legacy_dst/00typearray.py"), - ("./legacy_src/00additional.yaml", "./legacy_dst/00additional.py"), - ("./legacy_src/01additional.yaml", "./legacy_dst/01additional.py"), - ("./legacy_src/00nullable.yaml", "./legacy_dst/00nullable.py"), - ("./legacy_src/00primitiveapi.yaml", "./legacy_dst/00primitiveapi.py"), + ("./legacy_src/00reserved.yaml", "./legacy_dst/00reserved.py", ""), + ("./legacy_src/00typearray.yaml", "./legacy_dst/00typearray.py", ""), + ("./legacy_src/00additional.yaml", "./legacy_dst/00additional.py", ""), + ("./legacy_src/01additional.yaml", "./legacy_dst/01additional.py", ""), + ("./legacy_src/00nullable.yaml", "./legacy_dst/00nullable.py", ""), + ("./legacy_src/00primitiveapi.yaml", "./legacy_dst/00primitiveapi.py", ""), # ("./legacy_src/00patternProperties.yaml", "./legacy_dst/00patternProperties.py"), not supported yet ], ) -def test_v2( - src_file, - dst_file, +def test( + src_file: str, + dst_file: str, + header_comment: str, ): from swagger_marshmallow_codegen.lifting import lifting_definition from swagger_marshmallow_codegen.codegen import Context @@ -62,9 +64,8 @@ def test_v2( get_codegen().codegen( lifting_definition(d), - {"schema": True, "input": True, "output": True}, + {"schema": True, "input": True, "output": True, "header_comment": header_comment}, ctx=ctx, - test=True, ) expected = load_dstfile(dst_file, here=here).rstrip("\n")
CI is broken, DateTime field handling is changed refs https://github.com/marshmallow-code/marshmallow/issues/1234
0.0
6d5dcfa88e8882a293434e3c3fcbf4837fd21c7d
[ "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00person.yaml-./legacy_dst/00person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01person.yaml-./legacy_dst/01person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02person.yaml-./legacy_dst/02person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/03person.yaml-./legacy_dst/03person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/04person.yaml-./legacy_dst/04person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/05person.yaml-./legacy_dst/05person.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00commit.yaml-./legacy_dst/00commit.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01commit.yaml-./legacy_dst/01commit.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00emojis.yaml-./legacy_dst/00emojis.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00stat.yaml-./legacy_dst/00stat.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00maximum.yaml-./legacy_dst/00maximum.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00length.yaml-./legacy_dst/00length.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00regex.yaml-./legacy_dst/00regex.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00enum.yaml-./legacy_dst/00enum.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00items.yaml-./legacy_dst/00items.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00readonly.yaml-./legacy_dst/00readonly.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00allOf.yaml-./legacy_dst/00allOf.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00allOf2.yaml-./legacy_dst/00allOf2.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01allOf2.yaml-./legacy_dst/01allOf2.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02allOf2.yaml-./legacy_dst/02allOf2.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00paths.yaml-./legacy_dst/00paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01paths.yaml-./legacy_dst/01paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02paths.yaml-./legacy_dst/02paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/03paths.yaml-./legacy_dst/03paths.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00empty.yaml-./legacy_dst/00empty.py-#", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01empty.yaml-./legacy_dst/01empty.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00list_with_options.yaml-./legacy_dst/00list_with_options.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00reserved.yaml-./legacy_dst/00reserved.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00typearray.yaml-./legacy_dst/00typearray.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00additional.yaml-./legacy_dst/00additional.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01additional.yaml-./legacy_dst/01additional.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00nullable.yaml-./legacy_dst/00nullable.py-]", "swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00primitiveapi.yaml-./legacy_dst/00primitiveapi.py-]" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2020-11-03 10:22:46+00:00
mit
4,619
poliastro__poliastro-248
diff --git a/src/poliastro/ephem.py b/src/poliastro/ephem.py index e24aebfc..279d0952 100644 --- a/src/poliastro/ephem.py +++ b/src/poliastro/ephem.py @@ -6,6 +6,10 @@ from astropy import units as u from astropy.coordinates import get_body_barycentric_posvel +class TimeScaleWarning(UserWarning): + pass + + def get_body_ephem(body, epoch): """Position and velocity vectors of a given body at a certain time. diff --git a/src/poliastro/twobody/orbit.py b/src/poliastro/twobody/orbit.py index 0bb0a2f7..daef456f 100644 --- a/src/poliastro/twobody/orbit.py +++ b/src/poliastro/twobody/orbit.py @@ -1,5 +1,4 @@ -# coding: utf-8 -from datetime import datetime +from warnings import warn import numpy as np @@ -8,7 +7,7 @@ from astropy import units as u from astropy import time from poliastro.constants import J2000 -from poliastro.ephem import get_body_ephem +from poliastro.ephem import get_body_ephem, TimeScaleWarning from poliastro.twobody.propagation import propagate import poliastro.twobody.rv @@ -125,7 +124,12 @@ class Orbit(object): """ if not epoch: - epoch = time.Time.now() + epoch = time.Time.now().tdb + elif epoch.scale != 'tdb': + epoch = epoch.tdb + warn("Input time was converted to scale='tdb' with value " + "{}. Use Time(..., scale='tdb') instead." + .format(epoch.tdb.value), TimeScaleWarning) r, v = get_body_ephem(body.name, epoch) return cls.from_vectors(body.parent, r, v, epoch)
poliastro/poliastro
057efa5aec406788246723b29224dc7a7f61b73a
diff --git a/src/poliastro/tests/tests_twobody/test_orbit.py b/src/poliastro/tests/tests_twobody/test_orbit.py index c818a5a3..c58ff2ca 100644 --- a/src/poliastro/tests/tests_twobody/test_orbit.py +++ b/src/poliastro/tests/tests_twobody/test_orbit.py @@ -1,4 +1,3 @@ -# coding: utf-8 import pytest from numpy.testing import assert_allclose @@ -9,6 +8,7 @@ from astropy.tests.helper import assert_quantity_allclose from astropy import time from poliastro.bodies import Sun, Earth +from poliastro.ephem import TimeScaleWarning from poliastro.twobody import Orbit from poliastro.constants import J2000 @@ -85,6 +85,17 @@ def test_orbit_from_ephem_with_no_epoch_is_today(): assert (time.Time.now() - ss.epoch).sec < 1 +def test_from_ephem_raises_warning_if_time_is_not_tdb_with_proper_time(recwarn): + body = Earth + epoch = time.Time("2017-09-29 07:31:26", scale="utc") + expected_epoch_string = "2017-09-29 07:32:35.182" # epoch.tdb.value + + Orbit.from_body_ephem(body, epoch) + + w = recwarn.pop(TimeScaleWarning) + assert expected_epoch_string in str(w.message) + + def test_circular_has_proper_semimajor_axis(): alt = 500 * u.km attractor = Earth
Add warning if input time for ephemeris is not TDB Those users that want utter precision might want to check against HORIZONS or SPICE. These systems use TDB as their input, so in the absence of time scale an implicit conversion is made, which makes use of the "wrong" time: https://space.stackexchange.com/q/22901 Example code: ``` In [17]: EPOCH = Time("2017-09-01 12:05:50") In [18]: Orbit.from_body_ephem(Venus, EPOCH) Out[18]: 1 x 1 AU x 24.4 deg orbit around Sun (☉) In [19]: _.epoch Out[19]: <Time object: scale='utc' format='iso' value=2017-09-01 12:05:50.000> In [20]: _.tdb Out[20]: <Time object: scale='tdb' format='iso' value=2017-09-01 12:06:59.183> In [21]: _19 == _20 Out[21]: True ``` I would raise a warning if the input scale is not TDB to avoid this confusion, and also set the epoch to TDB to signal this fact. Thoughts? @poliastro/development
0.0
057efa5aec406788246723b29224dc7a7f61b73a
[ "src/poliastro/tests/tests_twobody/test_orbit.py::test_default_time_for_new_state", "src/poliastro/tests/tests_twobody/test_orbit.py::test_state_raises_unitserror_if_elements_units_are_wrong", "src/poliastro/tests/tests_twobody/test_orbit.py::test_state_raises_unitserror_if_rv_units_are_wrong", "src/poliastro/tests/tests_twobody/test_orbit.py::test_parabolic_elements_fail_early", "src/poliastro/tests/tests_twobody/test_orbit.py::test_bad_inclination_raises_exception", "src/poliastro/tests/tests_twobody/test_orbit.py::test_apply_maneuver_changes_epoch", "src/poliastro/tests/tests_twobody/test_orbit.py::test_orbit_from_ephem_with_no_epoch_is_today", "src/poliastro/tests/tests_twobody/test_orbit.py::test_from_ephem_raises_warning_if_time_is_not_tdb_with_proper_time", "src/poliastro/tests/tests_twobody/test_orbit.py::test_circular_has_proper_semimajor_axis", "src/poliastro/tests/tests_twobody/test_orbit.py::test_geosync_has_proper_period", "src/poliastro/tests/tests_twobody/test_orbit.py::test_parabolic_has_proper_eccentricity", "src/poliastro/tests/tests_twobody/test_orbit.py::test_parabolic_has_zero_energy", "src/poliastro/tests/tests_twobody/test_orbit.py::test_pqw_for_circular_equatorial_orbit", "src/poliastro/tests/tests_twobody/test_orbit.py::test_orbit_representation" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-09-29 07:53:25+00:00
mit
4,620
polygraph-python__polygraph-17
diff --git a/polygraph/types/basic_type.py b/polygraph/types/basic_type.py index 900110a..3498d26 100644 --- a/polygraph/types/basic_type.py +++ b/polygraph/types/basic_type.py @@ -3,6 +3,10 @@ from polygraph.types.definitions import TypeDefinition, TypeKind from polygraph.utils.trim_docstring import trim_docstring +def typedef(type_): + return type_.__type + + class PolygraphTypeMeta(type): def __new__(cls, name, bases, namespace): default_description = trim_docstring(namespace.get("__doc__", "")) @@ -17,7 +21,7 @@ class PolygraphTypeMeta(type): meta = None if meta: - namespace["_type"] = TypeDefinition( + namespace["__type"] = TypeDefinition( kind=getattr(meta, "kind"), name=getattr(meta, "name", name) or name, description=getattr(meta, "description", default_description), @@ -32,7 +36,7 @@ class PolygraphTypeMeta(type): return super(PolygraphTypeMeta, cls).__new__(cls, name, bases, namespace) def __str__(self): - return str(self._type.name) + return str(typedef(self).name) def __or__(self, other): """ @@ -99,7 +103,7 @@ class Union(PolygraphOutputType, PolygraphType): message = "All types must be subclasses of PolygraphType. Invalid values: "\ "{}".format(", ".join(bad_types)) raise PolygraphSchemaError(message) - type_names = [t._type.name for t in types] + type_names = [typedef(t).name for t in types] def __new_from_value__(cls, value): if not any(isinstance(value, t) for t in types): @@ -132,7 +136,7 @@ class List(PolygraphType): """ def __new__(cls, type_): - type_name = type_._type.name + type_name = typedef(type_).name def __new_from_value__(cls, value): if value is None: @@ -157,7 +161,7 @@ class NonNull(PolygraphType): Represents a type for which null is not a valid result. """ def __new__(cls, type_): - type_name = type_._type.name + type_name = typedef(type_).name if issubclass(type, NonNull): raise TypeError("NonNull cannot modify NonNull types")
polygraph-python/polygraph
a32d1a2545e146c26396ed6d8c2199deca50ca51
diff --git a/polygraph/types/tests/test_scalars.py b/polygraph/types/tests/test_scalars.py index 79ccae2..8eecd6a 100644 --- a/polygraph/types/tests/test_scalars.py +++ b/polygraph/types/tests/test_scalars.py @@ -1,5 +1,6 @@ from unittest import TestCase +from polygraph.types.basic_type import typedef from polygraph.types.scalar import ID, Boolean, Float, Int, String @@ -43,7 +44,7 @@ class BooleanTest(TestCase): def test_class_types(self): self.assertTrue(Boolean(True)) self.assertFalse(Boolean(False)) - self.assertEqual(Boolean._type.name, "Boolean") + self.assertEqual(typedef(Boolean).name, "Boolean") def test_none(self): self.assertIsNone(Boolean(None)) diff --git a/polygraph/types/tests/test_types.py b/polygraph/types/tests/test_types.py index 6dc1e57..aa4bd3a 100644 --- a/polygraph/types/tests/test_types.py +++ b/polygraph/types/tests/test_types.py @@ -1,19 +1,19 @@ from unittest import TestCase, skip from polygraph.exceptions import PolygraphValueError -from polygraph.types.basic_type import List, NonNull +from polygraph.types.basic_type import List, NonNull, typedef from polygraph.types.scalar import Boolean, Int, String from polygraph.utils.trim_docstring import trim_docstring class TypeMetaTest(TestCase): def test_scalar_meta(self): - self.assertEqual(Int._type.name, "Int") - self.assertEqual(Int._type.description, trim_docstring(Int.__doc__)) - self.assertEqual(String._type.name, "String") - self.assertEqual(String._type.description, trim_docstring(String.__doc__)) - self.assertEqual(Boolean._type.name, "Boolean") - self.assertEqual(Boolean._type.description, trim_docstring(Boolean.__doc__)) + self.assertEqual(typedef(Int).name, "Int") + self.assertEqual(typedef(Int).description, trim_docstring(Int.__doc__)) + self.assertEqual(typedef(String).name, "String") + self.assertEqual(typedef(String).description, trim_docstring(String.__doc__)) + self.assertEqual(typedef(Boolean).name, "Boolean") + self.assertEqual(typedef(Boolean).description, trim_docstring(Boolean.__doc__)) def test_type_string(self): self.assertEqual(str(Int), "Int") @@ -26,8 +26,8 @@ class TypeMetaTest(TestCase): """Not the description""" pass - self.assertEqual(FancyString._type.name, "String") - self.assertNotEqual(FancyString._type.description, "Not the description") + self.assertEqual(FancyString.__type.name, "String") + self.assertNotEqual(FancyString.__type.description, "Not the description") class NonNullTest(TestCase):
PolygraphType: Rename `_type` to `__type`
0.0
a32d1a2545e146c26396ed6d8c2199deca50ca51
[ "polygraph/types/tests/test_scalars.py::IntTest::test_class_types", "polygraph/types/tests/test_scalars.py::IntTest::test_none", "polygraph/types/tests/test_scalars.py::StringTest::test_class_types", "polygraph/types/tests/test_scalars.py::StringTest::test_none", "polygraph/types/tests/test_scalars.py::FloatTest::test_class_types", "polygraph/types/tests/test_scalars.py::BooleanTest::test_class_types", "polygraph/types/tests/test_scalars.py::BooleanTest::test_none", "polygraph/types/tests/test_scalars.py::IDTest::test_id_int", "polygraph/types/tests/test_scalars.py::IDTest::test_id_string", "polygraph/types/tests/test_types.py::TypeMetaTest::test_scalar_meta", "polygraph/types/tests/test_types.py::TypeMetaTest::test_type_string", "polygraph/types/tests/test_types.py::NonNullTest::test_cannot_have_nonnull_of_nonnull", "polygraph/types/tests/test_types.py::NonNullTest::test_nonnull_accepts_values", "polygraph/types/tests/test_types.py::NonNullTest::test_nonnull_doesnt_accept_none", "polygraph/types/tests/test_types.py::NonNullTest::test_string", "polygraph/types/tests/test_types.py::ListTest::test_list_of_nonnulls", "polygraph/types/tests/test_types.py::ListTest::test_scalar_list" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-04-16 12:39:42+00:00
mit
4,621
polygraph-python__polygraph-19
diff --git a/polygraph/types/basic_type.py b/polygraph/types/basic_type.py index 3498d26..26e54b8 100644 --- a/polygraph/types/basic_type.py +++ b/polygraph/types/basic_type.py @@ -1,3 +1,5 @@ +from functools import wraps + from polygraph.exceptions import PolygraphSchemaError, PolygraphValueError from polygraph.types.definitions import TypeDefinition, TypeKind from polygraph.utils.trim_docstring import trim_docstring @@ -7,6 +9,22 @@ def typedef(type_): return type_.__type +type_builder_registry = {} + + +def type_builder_cache(method): + @wraps(method) + def wrapper(cls, *args): + unique_args = frozenset(args) + if (cls, unique_args) in type_builder_registry: + return type_builder_registry[(cls, unique_args)] + else: + return_val = method(cls, *args) + type_builder_registry[(cls, unique_args)] = return_val + return return_val + return wrapper + + class PolygraphTypeMeta(type): def __new__(cls, name, bases, namespace): default_description = trim_docstring(namespace.get("__doc__", "")) @@ -95,6 +113,7 @@ class Union(PolygraphOutputType, PolygraphType): GraphQL Object types, but provides for no guaranteed fields between those types. """ + @type_builder_cache def __new__(cls, *types): types = set(types) assert len(types) >= 2, "Unions must consist of more than 1 type" @@ -135,6 +154,7 @@ class List(PolygraphType): each item in the list is serialized as per the item type. """ + @type_builder_cache def __new__(cls, type_): type_name = typedef(type_).name @@ -160,6 +180,7 @@ class NonNull(PolygraphType): """ Represents a type for which null is not a valid result. """ + @type_builder_cache def __new__(cls, type_): type_name = typedef(type_).name
polygraph-python/polygraph
15e259607a85cddd45f2b0a44115b7d430970a25
diff --git a/polygraph/types/tests/test_union.py b/polygraph/types/tests/test_union.py index 0c60c7d..07abe3d 100644 --- a/polygraph/types/tests/test_union.py +++ b/polygraph/types/tests/test_union.py @@ -5,11 +5,13 @@ from polygraph.types.basic_type import Union from polygraph.types.scalar import Float, Int, String -@skip # FIXME +# @skip # FIXME class UnionTypeTest(TestCase): def test_commutativity(self): self.assertEqual(Union(String, Int), Union(Int, String)) + self.assertEqual(Union(String, Int, Float), Union(Float, String, Int, String)) + @skip def test_associativity(self): self.assertEqual( Union(Union(String, Int), Float), @@ -22,6 +24,7 @@ class UnionTypeTest(TestCase): Union(String, Int), ) + @skip def test_pipe_operator_with_more_than_two_types(self): self.assertEqual( String | Int | Float,
Implement GraphQL type cache for GraphQL meta-types
0.0
15e259607a85cddd45f2b0a44115b7d430970a25
[ "polygraph/types/tests/test_union.py::UnionTypeTest::test_commutativity", "polygraph/types/tests/test_union.py::UnionTypeTest::test_pipe_operator" ]
[ "polygraph/types/tests/test_union.py::UnionValueTest::test_valid_type", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_be_typed", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_have_right_type" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2017-04-16 13:19:00+00:00
mit
4,622
polygraph-python__polygraph-24
diff --git a/polygraph/types/api.py b/polygraph/types/api.py new file mode 100644 index 0000000..8ffc397 --- /dev/null +++ b/polygraph/types/api.py @@ -0,0 +1,2 @@ +def typedef(type_): + return type_.__type diff --git a/polygraph/types/basic_type.py b/polygraph/types/basic_type.py index 26e54b8..6cbce15 100644 --- a/polygraph/types/basic_type.py +++ b/polygraph/types/basic_type.py @@ -1,30 +1,9 @@ -from functools import wraps - -from polygraph.exceptions import PolygraphSchemaError, PolygraphValueError +from polygraph.exceptions import PolygraphValueError +from polygraph.types.api import typedef from polygraph.types.definitions import TypeDefinition, TypeKind from polygraph.utils.trim_docstring import trim_docstring -def typedef(type_): - return type_.__type - - -type_builder_registry = {} - - -def type_builder_cache(method): - @wraps(method) - def wrapper(cls, *args): - unique_args = frozenset(args) - if (cls, unique_args) in type_builder_registry: - return type_builder_registry[(cls, unique_args)] - else: - return_val = method(cls, *args) - type_builder_registry[(cls, unique_args)] = return_val - return return_val - return wrapper - - class PolygraphTypeMeta(type): def __new__(cls, name, bases, namespace): default_description = trim_docstring(namespace.get("__doc__", "")) @@ -62,6 +41,7 @@ class PolygraphTypeMeta(type): > x = String | Int """ + from polygraph.types.type_builder import Union return Union(self, other) @@ -107,103 +87,6 @@ class Interface(PolygraphOutputType, PolygraphType): kind = TypeKind.INTERFACE -class Union(PolygraphOutputType, PolygraphType): - """ - GraphQL Unions represent an object that could be one of a list of - GraphQL Object types, but provides for no guaranteed fields between - those types. - """ - @type_builder_cache - def __new__(cls, *types): - types = set(types) - assert len(types) >= 2, "Unions must consist of more than 1 type" - bad_types = [t for t in types if not issubclass(t, PolygraphType)] - if bad_types: - message = "All types must be subclasses of PolygraphType. Invalid values: "\ - "{}".format(", ".join(bad_types)) - raise PolygraphSchemaError(message) - type_names = [typedef(t).name for t in types] - - def __new_from_value__(cls, value): - if not any(isinstance(value, t) for t in types): - valid_types = ", ".join(type_names) - message = "{} is an invalid value type. "\ - "Valid types: {}".format(type(value), valid_types) - raise PolygraphValueError(message) - return value - - class Type: - name = "|".join(type_names) - description = "One of {}".format(", ".join(type_names)) - possible_types = types - kind = TypeKind.UNION - - name = "Union__" + "_".join(type_names) - bases = (Union, ) - attrs = {"__new__": __new_from_value__} - return type(name, bases, attrs) - - -class List(PolygraphType): - """ - A GraphQL list is a special collection type which declares the - type of each item in the List (referred to as the item type of - the list). - - List values are serialized as ordered lists, where - each item in the list is serialized as per the item type. - """ - - @type_builder_cache - def __new__(cls, type_): - type_name = typedef(type_).name - - def __new_from_value__(cls, value): - if value is None: - return None - ret_val = [type_(v) for v in value] - return list.__new__(cls, ret_val) - - class Type: - name = "[{}]".format(type_name) - description = "A list of {}".format(type_name) - kind = TypeKind.LIST - of_type = type_ - - name = "List__" + type_name - bases = (List, list) - attrs = {"__new__": __new_from_value__, "Type": Type} - return type(name, bases, attrs) - - -class NonNull(PolygraphType): - """ - Represents a type for which null is not a valid result. - """ - @type_builder_cache - def __new__(cls, type_): - type_name = typedef(type_).name - - if issubclass(type, NonNull): - raise TypeError("NonNull cannot modify NonNull types") - - class Type: - name = type_name + "!" - description = "A non-nullable version of {}".format(type_name) - kind = TypeKind.NON_NULL - of_type = type_ - - def __new_from_value__(cls, value): - if value is None: - raise PolygraphValueError("Non-nullable value cannot be None") - return type_.__new__(cls, value) - - name = "NonNull__" + type_name - bases = (NonNull, type_, ) - attrs = {"__new__": __new_from_value__, "Type": Type} - return type(name, bases, attrs) - - class InputObject(PolygraphInputType, PolygraphType): """ An Input Object defines a set of input fields; the input fields diff --git a/polygraph/types/type_builder.py b/polygraph/types/type_builder.py new file mode 100644 index 0000000..943b40d --- /dev/null +++ b/polygraph/types/type_builder.py @@ -0,0 +1,137 @@ +from functools import wraps + +from polygraph.exceptions import PolygraphSchemaError, PolygraphValueError +from polygraph.types.api import typedef +from polygraph.types.basic_type import PolygraphOutputType, PolygraphType +from polygraph.types.definitions import TypeKind +from polygraph.utils.deduplicate import deduplicate + + +type_builder_registry = {} + + +def flatten(iterable): + for arg in iterable: + if issubclass(arg, Union): + yield from flatten(arg.__type.possible_types) + else: + yield arg + + +def deduplicate_union_args(method): + @wraps(method) + def wrapper(cls, *types): + types = list(deduplicate(flatten(types))) + return method(cls, *types) + return wrapper + + +def type_builder_cache(method): + @wraps(method) + def wrapper(cls, *args): + unique_args = frozenset(args) + if (cls, unique_args) in type_builder_registry: + return type_builder_registry[(cls, unique_args)] + else: + return_val = method(cls, *args) + type_builder_registry[(cls, unique_args)] = return_val + return return_val + return wrapper + + +class Union(PolygraphOutputType, PolygraphType): + """ + GraphQL Unions represent an object that could be one of a list of + GraphQL Object types, but provides for no guaranteed fields between + those types. + """ + + @deduplicate_union_args + @type_builder_cache + def __new__(cls, *types): + assert len(types) >= 2, "Unions must consist of more than 1 type" + bad_types = [t for t in types if not issubclass(t, PolygraphType)] + if bad_types: + message = "All types must be subclasses of PolygraphType. Invalid values: "\ + "{}".format(", ".join(bad_types)) + raise PolygraphSchemaError(message) + type_names = [typedef(t).name for t in types] + + def __new_from_value__(cls, value): + if not any(isinstance(value, t) for t in types): + valid_types = ", ".join(type_names) + message = "{} is an invalid value type. "\ + "Valid types: {}".format(type(value), valid_types) + raise PolygraphValueError(message) + return value + + class Type: + name = "|".join(type_names) + description = "One of {}".format(", ".join(type_names)) + possible_types = types + kind = TypeKind.UNION + + name = "Union__" + "_".join(type_names) + bases = (Union, ) + attrs = {"__new__": __new_from_value__, "Type": Type} + return type(name, bases, attrs) + + +class List(PolygraphType): + """ + A GraphQL list is a special collection type which declares the + type of each item in the List (referred to as the item type of + the list). + + List values are serialized as ordered lists, where + each item in the list is serialized as per the item type. + """ + + @type_builder_cache + def __new__(cls, type_): + type_name = typedef(type_).name + + def __new_from_value__(cls, value): + if value is None: + return None + ret_val = [type_(v) for v in value] + return list.__new__(cls, ret_val) + + class Type: + name = "[{}]".format(type_name) + description = "A list of {}".format(type_name) + kind = TypeKind.LIST + of_type = type_ + + name = "List__" + type_name + bases = (List, list) + attrs = {"__new__": __new_from_value__, "Type": Type} + return type(name, bases, attrs) + + +class NonNull(PolygraphType): + """ + Represents a type for which null is not a valid result. + """ + @type_builder_cache + def __new__(cls, type_): + type_name = typedef(type_).name + + if issubclass(type, NonNull): + raise TypeError("NonNull cannot modify NonNull types") + + class Type: + name = type_name + "!" + description = "A non-nullable version of {}".format(type_name) + kind = TypeKind.NON_NULL + of_type = type_ + + def __new_from_value__(cls, value): + if value is None: + raise PolygraphValueError("Non-nullable value cannot be None") + return type_.__new__(cls, value) + + name = "NonNull__" + type_name + bases = (NonNull, type_, ) + attrs = {"__new__": __new_from_value__, "Type": Type} + return type(name, bases, attrs) diff --git a/polygraph/utils/deduplicate.py b/polygraph/utils/deduplicate.py new file mode 100644 index 0000000..1554394 --- /dev/null +++ b/polygraph/utils/deduplicate.py @@ -0,0 +1,11 @@ +def deduplicate(iterable): + """ + Yields deduplicated values, in original order. + + The values of iterable must be hashable + """ + seen = set() + for val in iterable: + if val not in seen: + seen.add(val) + yield val
polygraph-python/polygraph
67ecab9c9c8f6f0ade8e4a55ed5b2c2e66bc9eae
diff --git a/polygraph/types/tests/test_object_type.py b/polygraph/types/tests/test_object_type.py index e328e6f..9ffd4d4 100644 --- a/polygraph/types/tests/test_object_type.py +++ b/polygraph/types/tests/test_object_type.py @@ -1,10 +1,10 @@ from unittest import TestCase from polygraph.exceptions import PolygraphValueError -from polygraph.types.basic_type import NonNull from polygraph.types.decorators import field from polygraph.types.object_type import ObjectType from polygraph.types.scalar import Int, String +from polygraph.types.type_builder import NonNull class HelloWorldObject(ObjectType): diff --git a/polygraph/types/tests/test_scalars.py b/polygraph/types/tests/test_scalars.py index 8eecd6a..1097619 100644 --- a/polygraph/types/tests/test_scalars.py +++ b/polygraph/types/tests/test_scalars.py @@ -1,6 +1,6 @@ from unittest import TestCase -from polygraph.types.basic_type import typedef +from polygraph.types.api import typedef from polygraph.types.scalar import ID, Boolean, Float, Int, String diff --git a/polygraph/types/tests/test_type_definitions.py b/polygraph/types/tests/test_type_definitions.py new file mode 100644 index 0000000..6e67345 --- /dev/null +++ b/polygraph/types/tests/test_type_definitions.py @@ -0,0 +1,27 @@ +from unittest import TestCase + +from polygraph.types.api import typedef +from polygraph.types.scalar import Float, Int, String +from polygraph.types.type_builder import List, NonNull, Union + + +class TestTypeDefinition(TestCase): + + def test_names_of_scalars(self): + type_names = [ + (String, "String"), + (Int, "Int"), + (Float, "Float"), + ] + for type_, name in type_names: + self.assertEqual(typedef(type_).name, name) + + def test_names_of_built_types(self): + type_names = [ + (List(String), "[String]"), + (Union(Int, String, List(String)), "Int|String|[String]"), + (NonNull(Int), "Int!"), + (NonNull(List(String)), "[String]!") + ] + for type_, name in type_names: + self.assertEqual(typedef(type_).name, name) diff --git a/polygraph/types/tests/test_types.py b/polygraph/types/tests/test_types.py index aa4bd3a..d08d30f 100644 --- a/polygraph/types/tests/test_types.py +++ b/polygraph/types/tests/test_types.py @@ -1,8 +1,9 @@ from unittest import TestCase, skip from polygraph.exceptions import PolygraphValueError -from polygraph.types.basic_type import List, NonNull, typedef +from polygraph.types.api import typedef from polygraph.types.scalar import Boolean, Int, String +from polygraph.types.type_builder import List, NonNull from polygraph.utils.trim_docstring import trim_docstring diff --git a/polygraph/types/tests/test_union.py b/polygraph/types/tests/test_union.py index 07abe3d..b04ac5e 100644 --- a/polygraph/types/tests/test_union.py +++ b/polygraph/types/tests/test_union.py @@ -1,15 +1,15 @@ from unittest import TestCase, skip from polygraph.exceptions import PolygraphValueError -from polygraph.types.basic_type import Union +from polygraph.types.type_builder import Union from polygraph.types.scalar import Float, Int, String -# @skip # FIXME class UnionTypeTest(TestCase): + def test_commutativity(self): self.assertEqual(Union(String, Int), Union(Int, String)) - self.assertEqual(Union(String, Int, Float), Union(Float, String, Int, String)) + self.assertEqual(Union(String, Int, Float), Union(Float, String, Int)) @skip def test_associativity(self): @@ -24,7 +24,6 @@ class UnionTypeTest(TestCase): Union(String, Int), ) - @skip def test_pipe_operator_with_more_than_two_types(self): self.assertEqual( String | Int | Float, diff --git a/polygraph/utils/tests/test_deduplicate.py b/polygraph/utils/tests/test_deduplicate.py new file mode 100644 index 0000000..5d0761b --- /dev/null +++ b/polygraph/utils/tests/test_deduplicate.py @@ -0,0 +1,11 @@ +from polygraph.utils.deduplicate import deduplicate +from unittest import TestCase + + +class DeduplicateTest(TestCase): + def test_deduplicate(self): + args = ['d', 'e', 'd', 'u', 'p', 'l', 'i', 'c', 'a', 't', 'e'] + self.assertEqual( + list(deduplicate(args)), + ['d', 'e', 'u', 'p', 'l', 'i', 'c', 'a', 't'], + )
`Union(Union(String, Int), Float)` should be the same as `Union(String, Int, Float)`
0.0
67ecab9c9c8f6f0ade8e4a55ed5b2c2e66bc9eae
[ "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_bad_resolver", "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_bare_resolver", "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_resolver_argument", "polygraph/types/tests/test_scalars.py::IntTest::test_class_types", "polygraph/types/tests/test_scalars.py::IntTest::test_none", "polygraph/types/tests/test_scalars.py::StringTest::test_class_types", "polygraph/types/tests/test_scalars.py::StringTest::test_none", "polygraph/types/tests/test_scalars.py::FloatTest::test_class_types", "polygraph/types/tests/test_scalars.py::BooleanTest::test_class_types", "polygraph/types/tests/test_scalars.py::BooleanTest::test_none", "polygraph/types/tests/test_scalars.py::IDTest::test_id_int", "polygraph/types/tests/test_scalars.py::IDTest::test_id_string", "polygraph/types/tests/test_type_definitions.py::TestTypeDefinition::test_names_of_built_types", "polygraph/types/tests/test_type_definitions.py::TestTypeDefinition::test_names_of_scalars", "polygraph/types/tests/test_types.py::TypeMetaTest::test_scalar_meta", "polygraph/types/tests/test_types.py::TypeMetaTest::test_type_string", "polygraph/types/tests/test_types.py::NonNullTest::test_cannot_have_nonnull_of_nonnull", "polygraph/types/tests/test_types.py::NonNullTest::test_nonnull_accepts_values", "polygraph/types/tests/test_types.py::NonNullTest::test_nonnull_doesnt_accept_none", "polygraph/types/tests/test_types.py::NonNullTest::test_string", "polygraph/types/tests/test_types.py::ListTest::test_list_of_nonnulls", "polygraph/types/tests/test_types.py::ListTest::test_scalar_list", "polygraph/types/tests/test_union.py::UnionTypeTest::test_commutativity", "polygraph/types/tests/test_union.py::UnionTypeTest::test_pipe_operator", "polygraph/types/tests/test_union.py::UnionTypeTest::test_pipe_operator_with_more_than_two_types", "polygraph/types/tests/test_union.py::UnionValueTest::test_valid_type", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_be_typed", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_have_right_type", "polygraph/utils/tests/test_deduplicate.py::DeduplicateTest::test_deduplicate" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files" ], "has_test_patch": true, "is_lite": false }
2017-04-17 21:15:38+00:00
mit
4,623
polygraph-python__polygraph-31
diff --git a/polygraph/types/object_type.py b/polygraph/types/object_type.py index 9567e1b..b8e0a82 100644 --- a/polygraph/types/object_type.py +++ b/polygraph/types/object_type.py @@ -8,5 +8,8 @@ class ObjectType(PolygraphOutputType, PolygraphType, dict): a value of a specific type. """ + def __init__(self, root=None): + self.root = root + class Type: kind = TypeKind.OBJECT
polygraph-python/polygraph
b95e00dbc6ba9738fcebf97e9a0bdab04fcd5fbd
diff --git a/polygraph/types/tests/test_object_type.py b/polygraph/types/tests/test_object_type.py index 5e380b3..6bb7747 100644 --- a/polygraph/types/tests/test_object_type.py +++ b/polygraph/types/tests/test_object_type.py @@ -1,3 +1,4 @@ +from types import SimpleNamespace from unittest import TestCase from polygraph.exceptions import PolygraphValueError @@ -51,3 +52,53 @@ class SimpleObjectTypeTest(TestCase): hello_world = HelloWorldObject() with self.assertRaises(PolygraphValueError): hello_world.bad_resolver() + + +class ObjectResolver(ObjectType): + @field() + def name(self) -> NonNull(String): + return self.full_name() + + @field() + def age_in_2017(self) -> NonNull(Int): + return 2017 - self.root.birthyear + + @field() + def always_none(self) -> String: + return self.root.address + + @field() + def greeting(self) -> HelloWorldObject: + return HelloWorldObject() + + def full_name(self): + return self.root.first_name + " " + self.root.last_name + + +class ObjectResolverTest(TestCase): + def setUp(self): + obj = SimpleNamespace( + first_name="John", + last_name="Smith", + birthyear=2000, + address=None, + ) + self.object_type = ObjectResolver(obj) + + def test_method_is_not_automatically_field(self): + type_info = typedef(self.object_type) + fields = set([f.name for f in type_info.fields]) + self.assertEqual( + fields, + set(["name", "age_in_2017", "always_none", "greeting"]), + ) + self.assertNotIn("full_name", fields) + + def test_simple_resolver(self): + self.assertEqual(self.object_type.name(), "John Smith") + self.assertEqual(self.object_type.age_in_2017(), 17) + self.assertEqual(self.object_type.always_none(), None) + + def test_resolve_to_object(self): + greeting = self.object_type.greeting() + self.assertEqual(greeting.greet_world(), "Hello world!")
ObjectType: resolve from Python object or dict
0.0
b95e00dbc6ba9738fcebf97e9a0bdab04fcd5fbd
[ "polygraph/types/tests/test_object_type.py::ObjectResolverTest::test_method_is_not_automatically_field", "polygraph/types/tests/test_object_type.py::ObjectResolverTest::test_resolve_to_object", "polygraph/types/tests/test_object_type.py::ObjectResolverTest::test_simple_resolver" ]
[ "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_bad_resolver", "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_bare_resolver", "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_object_type_definition", "polygraph/types/tests/test_object_type.py::SimpleObjectTypeTest::test_resolver_argument" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-04-17 22:32:07+00:00
mit
4,624
polygraph-python__polygraph-34
diff --git a/polygraph/types/basic_type.py b/polygraph/types/basic_type.py index 7e889d1..20dc9b6 100644 --- a/polygraph/types/basic_type.py +++ b/polygraph/types/basic_type.py @@ -1,6 +1,6 @@ from polygraph.exceptions import PolygraphValueError from polygraph.types.api import typedef -from polygraph.types.definitions import TypeDefinition, TypeKind +from polygraph.types.definitions import EnumValue, TypeDefinition, TypeKind from polygraph.utils.trim_docstring import trim_docstring @@ -12,6 +12,12 @@ def get_field_list(namespace): ] +def get_enum_value_list(namespace): + return [ + value for value in namespace.values() if isinstance(value, EnumValue) + ] + + class PolygraphTypeMeta(type): def __new__(cls, name, bases, namespace): default_description = trim_docstring(namespace.get("__doc__", "")) @@ -33,7 +39,7 @@ class PolygraphTypeMeta(type): fields=get_field_list(namespace), possible_types=getattr(meta, "possible_types", None), interfaces=None, # FIXME - enum_values=None, # FIXME + enum_values=get_enum_value_list(namespace), input_fields=None, # FIXME of_type=getattr(meta, "of_type", None) ) diff --git a/polygraph/types/definitions.py b/polygraph/types/definitions.py index 1d0008a..5943fc1 100644 --- a/polygraph/types/definitions.py +++ b/polygraph/types/definitions.py @@ -29,6 +29,20 @@ Field = namedtuple( ) +class EnumValue: + __slots__ = ["name", "description", "is_deprecated", "deprecation_reason", "parent"] + + def __init__(self, name, parent, description=None, deprecation_reason=None): + self.name = name + self.description = description + self.is_deprecated = bool(deprecation_reason) + self.deprecation_reason = deprecation_reason + self.parent = parent + + def __repr__(self): + return "EnumValue('{}')".format(self.name) + + TypeDefinition = namedtuple( "TypeDefinition", [ diff --git a/polygraph/types/enum.py b/polygraph/types/enum.py index 42efa7f..110d0ff 100644 --- a/polygraph/types/enum.py +++ b/polygraph/types/enum.py @@ -4,21 +4,7 @@ from polygraph.types.basic_type import ( PolygraphOutputType, PolygraphTypeMeta, ) -from polygraph.types.definitions import TypeKind - - -class EnumValue: - __slots__ = ["name", "description", "is_deprecated", "deprecation_reason", "parent"] - - def __init__(self, name, parent, description=None, deprecation_reason=None): - self.name = name - self.description = description - self.is_deprecated = bool(deprecation_reason) - self.deprecation_reason = deprecation_reason - self.parent = parent - - def __repr__(self): - return "EnumValue('{}')".format(self.name) +from polygraph.types.definitions import EnumValue, TypeKind class EnumTypeMeta(PolygraphTypeMeta):
polygraph-python/polygraph
ad747c9e64f551b9a8b0392994675a4bb79de4a7
diff --git a/polygraph/types/tests/test_enum.py b/polygraph/types/tests/test_enum.py index c59940c..dabba73 100644 --- a/polygraph/types/tests/test_enum.py +++ b/polygraph/types/tests/test_enum.py @@ -1,6 +1,7 @@ from unittest import TestCase from polygraph.exceptions import PolygraphValueError +from polygraph.types.api import typedef from polygraph.types.enum import EnumType @@ -10,6 +11,12 @@ class Colours(EnumType): BLUE = "The colour of sloth" +class Shapes(EnumType): + RECTANGLE = "A quadrangle" + SQUARE = "Also a quadrangle" + RHOMBUS = "Yet another quadrangle" + + class EnumTest(TestCase): def test_simple_enum(self): @@ -29,3 +36,14 @@ class EnumTest(TestCase): self.assertEqual(Colours(Colours.RED), Colours.RED) with self.assertRaises(PolygraphValueError): Colours("RED") + + def test_enum_values_dont_mix(self): + with self.assertRaises(PolygraphValueError): + Colours(Shapes.RECTANGLE) + + with self.assertRaises(PolygraphValueError): + Shapes(Colours.BLUE) + + def test_enum_type(self): + colour_type = typedef(Colours) + self.assertEqual(len(colour_type.enum_values), 3)
Populate EnumValue field for Enum type definiton
0.0
ad747c9e64f551b9a8b0392994675a4bb79de4a7
[ "polygraph/types/tests/test_enum.py::EnumTest::test_enum_type" ]
[ "polygraph/types/tests/test_enum.py::EnumTest::test_enum_value", "polygraph/types/tests/test_enum.py::EnumTest::test_enum_values_dont_mix", "polygraph/types/tests/test_enum.py::EnumTest::test_simple_enum" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-04-18 12:15:56+00:00
mit
4,625
polygraph-python__polygraph-43
diff --git a/polygraph/types/type_builder.py b/polygraph/types/type_builder.py index 2b20577..fcce7f3 100644 --- a/polygraph/types/type_builder.py +++ b/polygraph/types/type_builder.py @@ -2,7 +2,11 @@ from functools import wraps from polygraph.exceptions import PolygraphSchemaError, PolygraphValueError from polygraph.types.api import typedef -from polygraph.types.basic_type import PolygraphOutputType, PolygraphType +from polygraph.types.basic_type import ( + PolygraphOutputType, + PolygraphType, + PolygraphTypeMeta, +) from polygraph.types.definitions import TypeKind from polygraph.utils.deduplicate import deduplicate @@ -39,7 +43,15 @@ def type_builder_cache(method): return wrapper -class Union(PolygraphOutputType, PolygraphType): +class TypeBuilderMeta(PolygraphTypeMeta): + def __getitem__(self, value): + try: + return self.__new__(self, *value) + except TypeError: + return self.__new__(self, value) + + +class Union(PolygraphOutputType, PolygraphType, metaclass=TypeBuilderMeta): """ GraphQL Unions represent an object that could be one of a list of GraphQL Object types, but provides for no guaranteed fields between @@ -77,7 +89,7 @@ class Union(PolygraphOutputType, PolygraphType): return type(name, bases, attrs) -class List(PolygraphOutputType, PolygraphType): +class List(PolygraphOutputType, PolygraphType, metaclass=TypeBuilderMeta): """ A GraphQL list is a special collection type which declares the type of each item in the List (referred to as the item type of @@ -109,7 +121,7 @@ class List(PolygraphOutputType, PolygraphType): return type(name, bases, attrs) -class NonNull(PolygraphType): +class NonNull(PolygraphType, metaclass=TypeBuilderMeta): """ Represents a type for which null is not a valid result. """
polygraph-python/polygraph
f0897942b1fb6d6412a81852646eb94697d4632f
diff --git a/polygraph/types/tests/helper.py b/polygraph/types/tests/helper.py new file mode 100644 index 0000000..59d17f2 --- /dev/null +++ b/polygraph/types/tests/helper.py @@ -0,0 +1,23 @@ +from polygraph.types.decorators import field +from polygraph.types.object_type import ObjectType +from polygraph.types.scalar import Int, String + + +class Person(ObjectType): + @field() + def name(self) -> String: + pass + + @field() + def age(self) -> Int: + pass + + +class Animal(ObjectType): + @field() + def name(self) -> String: + pass + + @field() + def sound(self) -> String: + pass diff --git a/polygraph/types/tests/test_types.py b/polygraph/types/tests/test_types.py index d08d30f..cb6bf0a 100644 --- a/polygraph/types/tests/test_types.py +++ b/polygraph/types/tests/test_types.py @@ -50,6 +50,9 @@ class NonNullTest(TestCase): with self.assertRaises(TypeError): NonNull(NonNullString) + def test_square_bracket_notation(self): + self.assertEqual(NonNull(String), NonNull[String]) + class ListTest(TestCase): @@ -67,3 +70,6 @@ class ListTest(TestCase): self.assertEqual(string_list(["a", "b", "c"]), ["a", "b", "c"]) with self.assertRaises(PolygraphValueError): string_list(["a", "b", "c", None]) + + def test_square_bracket_notation(self): + self.assertEqual(List(Int), List[Int]) diff --git a/polygraph/types/tests/test_union.py b/polygraph/types/tests/test_union.py index e98ba92..1150486 100644 --- a/polygraph/types/tests/test_union.py +++ b/polygraph/types/tests/test_union.py @@ -1,17 +1,23 @@ -from unittest import TestCase, skip +from unittest import TestCase from polygraph.exceptions import PolygraphValueError from polygraph.types.scalar import Float, Int, String +from polygraph.types.tests.helper import Animal, Person from polygraph.types.type_builder import Union class UnionTypeTest(TestCase): + def test_square_bracket_notation(self): + self.assertEqual( + Union(Person, Animal), + Union[Person, Animal], + ) + def test_commutativity(self): self.assertEqual(Union(String, Int), Union(Int, String)) self.assertEqual(Union(String, Int, Float), Union(Float, String, Int)) - @skip def test_associativity(self): self.assertEqual( Union(Union(String, Int), Float),
Use square brackets for wrapping types, e.g. `Union[X, Y]`
0.0
f0897942b1fb6d6412a81852646eb94697d4632f
[ "polygraph/types/tests/test_types.py::NonNullTest::test_square_bracket_notation", "polygraph/types/tests/test_types.py::ListTest::test_square_bracket_notation", "polygraph/types/tests/test_union.py::UnionTypeTest::test_square_bracket_notation" ]
[ "polygraph/types/tests/test_types.py::TypeMetaTest::test_scalar_meta", "polygraph/types/tests/test_types.py::TypeMetaTest::test_type_string", "polygraph/types/tests/test_types.py::NonNullTest::test_cannot_have_nonnull_of_nonnull", "polygraph/types/tests/test_types.py::NonNullTest::test_nonnull_accepts_values", "polygraph/types/tests/test_types.py::NonNullTest::test_nonnull_doesnt_accept_none", "polygraph/types/tests/test_types.py::NonNullTest::test_string", "polygraph/types/tests/test_types.py::ListTest::test_list_of_nonnulls", "polygraph/types/tests/test_types.py::ListTest::test_scalar_list", "polygraph/types/tests/test_union.py::UnionTypeTest::test_associativity", "polygraph/types/tests/test_union.py::UnionTypeTest::test_commutativity", "polygraph/types/tests/test_union.py::UnionTypeTest::test_pipe_operator", "polygraph/types/tests/test_union.py::UnionTypeTest::test_pipe_operator_with_more_than_two_types", "polygraph/types/tests/test_union.py::UnionValueTest::test_valid_type", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_be_typed", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_have_right_type" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-04-18 14:53:53+00:00
mit
4,626
polygraph-python__polygraph-63
diff --git a/polygraph/types/definitions.py b/polygraph/types/definitions.py index 5943fc1..dcec78e 100644 --- a/polygraph/types/definitions.py +++ b/polygraph/types/definitions.py @@ -32,12 +32,11 @@ Field = namedtuple( class EnumValue: __slots__ = ["name", "description", "is_deprecated", "deprecation_reason", "parent"] - def __init__(self, name, parent, description=None, deprecation_reason=None): + def __init__(self, description=None, name=None, deprecation_reason=None): self.name = name self.description = description self.is_deprecated = bool(deprecation_reason) self.deprecation_reason = deprecation_reason - self.parent = parent def __repr__(self): return "EnumValue('{}')".format(self.name) diff --git a/polygraph/types/enum.py b/polygraph/types/enum.py index 110d0ff..de76f83 100644 --- a/polygraph/types/enum.py +++ b/polygraph/types/enum.py @@ -9,10 +9,10 @@ from polygraph.types.definitions import EnumValue, TypeKind class EnumTypeMeta(PolygraphTypeMeta): def __new__(cls, name, bases, namespace): - for key, desc in namespace.items(): - if not key.startswith("_") and key != "Type": - desc = namespace.get(key) - namespace[key] = EnumValue(name=key, description=desc, parent=name) + for key, value in namespace.items(): + if type(value) == EnumValue: + value.name = value.name or key + value.parent = name return super().__new__(cls, name, bases, namespace)
polygraph-python/polygraph
a54bdeea322e4624cfa07379b4e3b06fa2c85e42
diff --git a/polygraph/types/tests/test_enum.py b/polygraph/types/tests/test_enum.py index dabba73..3525570 100644 --- a/polygraph/types/tests/test_enum.py +++ b/polygraph/types/tests/test_enum.py @@ -2,19 +2,19 @@ from unittest import TestCase from polygraph.exceptions import PolygraphValueError from polygraph.types.api import typedef -from polygraph.types.enum import EnumType +from polygraph.types.enum import EnumType, EnumValue class Colours(EnumType): - RED = "The colour of fury" - GREEN = "The colour of envy" - BLUE = "The colour of sloth" + RED = EnumValue("The colour of fury") + GREEN = EnumValue("The colour of envy") + BLUE = EnumValue("The colour of sloth") class Shapes(EnumType): - RECTANGLE = "A quadrangle" - SQUARE = "Also a quadrangle" - RHOMBUS = "Yet another quadrangle" + RECTANGLE = EnumValue("A quadrangle") + SQUARE = EnumValue("Also a quadrangle") + RHOMBUS = EnumValue("Yet another quadrangle") class EnumTest(TestCase): @@ -47,3 +47,11 @@ class EnumTest(TestCase): def test_enum_type(self): colour_type = typedef(Colours) self.assertEqual(len(colour_type.enum_values), 3) + + def test_enum_value_name(self): + class NamedValue(EnumType): + ORIGINAL = EnumValue("Name is ORIGINAL") + REPLACED = EnumValue("Name is NOT_REPLACED", name="NOT_REPLACED") + + self.assertEqual(NamedValue.ORIGINAL.name, "ORIGINAL") + self.assertEqual(NamedValue.REPLACED.name, "NOT_REPLACED")
EnumType should define values only using EnumValue
0.0
a54bdeea322e4624cfa07379b4e3b06fa2c85e42
[ "polygraph/types/tests/test_enum.py::EnumTest::test_enum_type", "polygraph/types/tests/test_enum.py::EnumTest::test_enum_value", "polygraph/types/tests/test_enum.py::EnumTest::test_enum_value_name", "polygraph/types/tests/test_enum.py::EnumTest::test_enum_values_dont_mix", "polygraph/types/tests/test_enum.py::EnumTest::test_simple_enum" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2017-05-08 14:25:40+00:00
mit
4,627
polygraph-python__polygraph-7
diff --git a/polygraph/types/basic_type.py b/polygraph/types/basic_type.py index 6c6d393..8c5f336 100644 --- a/polygraph/types/basic_type.py +++ b/polygraph/types/basic_type.py @@ -1,6 +1,6 @@ import types -from polygraph.exceptions import PolygraphValueError +from polygraph.exceptions import PolygraphSchemaError, PolygraphValueError from polygraph.utils.trim_docstring import trim_docstring @@ -10,6 +10,7 @@ class PolygraphTypeMeta(type): meta = namespace.pop("Type", types.SimpleNamespace()) meta.description = getattr(meta, "description", default_description) meta.name = getattr(meta, "name", name) or name + meta.possible_types = getattr(meta, "possible_types", None) namespace["_type"] = meta @@ -18,6 +19,9 @@ class PolygraphTypeMeta(type): def __str__(self): return str(self._type.name) + def __or__(self, other): + return Union(self, other) + class PolygraphType(metaclass=PolygraphTypeMeta): pass @@ -61,6 +65,34 @@ class Union(PolygraphOutputType, PolygraphType): GraphQL Object types, but provides for no guaranteed fields between those types. """ + def __new__(cls, *types): + types = set(types) + assert len(types) >= 2, "Unions must consist of more than 1 type" + bad_types = [t for t in types if not issubclass(t, PolygraphType)] + if bad_types: + message = "All types must be subclasses of PolygraphType. Invalid values: "\ + "{}".format(", ".join(bad_types)) + raise PolygraphSchemaError(message) + type_names = [t._type.name for t in types] + + class Unionable(PolygraphType): + def __new__(cls, value): + if type(value) not in cls._type.possible_types: + valid_types = ", ".join(str(t) for t in cls._type.possible_types) + message = "{} is an invalid value type. "\ + "Valid types: {}".format(type(value), valid_types) + raise PolygraphValueError(message) + return value + + class Type: + name = "|".join(type_names) + description = "One of {}".format(", ".join(type_names)) + possible_types = types + + name = "Union__" + "_".join(type_names) + bases = (Unionable, ) + attrs = {"Type": Type} + return type(name, bases, attrs) class Listable(PolygraphType, list):
polygraph-python/polygraph
b41d8ea4b5cb8fc90eae48a8398674fc4a4963a8
diff --git a/polygraph/types/tests/test_union.py b/polygraph/types/tests/test_union.py new file mode 100644 index 0000000..0c60c7d --- /dev/null +++ b/polygraph/types/tests/test_union.py @@ -0,0 +1,48 @@ +from unittest import TestCase, skip + +from polygraph.exceptions import PolygraphValueError +from polygraph.types.basic_type import Union +from polygraph.types.scalar import Float, Int, String + + +@skip # FIXME +class UnionTypeTest(TestCase): + def test_commutativity(self): + self.assertEqual(Union(String, Int), Union(Int, String)) + + def test_associativity(self): + self.assertEqual( + Union(Union(String, Int), Float), + Union(String, Int, Float), + ) + + def test_pipe_operator(self): + self.assertEqual( + String | Int, + Union(String, Int), + ) + + def test_pipe_operator_with_more_than_two_types(self): + self.assertEqual( + String | Int | Float, + Union(String, Int, Float), + ) + + +class UnionValueTest(TestCase): + def test_valid_type(self): + union = String | Int + self.assertEqual(union(Int(32)), Int(32)) + self.assertEqual(union(String("Test")), String("Test")) + + def test_value_must_be_typed(self): + union = String | Int + with self.assertRaises(PolygraphValueError): + union(32) + with self.assertRaises(PolygraphValueError): + union("Test") + + def test_value_must_have_right_type(self): + union = String | Int + with self.assertRaises(PolygraphValueError): + union(Float(32))
Union type http://graphql.org/learn/schema/#union-types
0.0
b41d8ea4b5cb8fc90eae48a8398674fc4a4963a8
[ "polygraph/types/tests/test_union.py::UnionValueTest::test_valid_type", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_be_typed", "polygraph/types/tests/test_union.py::UnionValueTest::test_value_must_have_right_type" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
2017-04-14 12:09:09+00:00
mit
4,628
popylar__popylar-25
diff --git a/.travis.yml b/.travis.yml index 737b58e..c18718d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,15 +11,19 @@ deploy: env: global: - PIP_DEPS="coveralls pytest-cov flake8" + python: - '2.7' -- '3.5' +- '3.6' + install: -- pip install $PIP_DEPS -- pip install -r requirements.txt -- python setup.py install +- travis_retry pip install $PIP_DEPS +- travis_retry pip install -r requirements.txt +- travis_retry pip install -e . + script: - flake8 --ignore N802,N806 `find . -name \*.py | grep -v setup.py | grep -v version.py | grep -v __init__.py | grep -v /docs/` - py.test --pyargs popylar --cov-report term-missing --cov=popylar + after_success: - coveralls diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..0bd6e3a --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +test: + py.test --pyargs popylar --cov-report term-missing --cov=popylar diff --git a/popylar/popylar.py b/popylar/popylar.py index 39c0c9a..aa5bf8f 100644 --- a/popylar/popylar.py +++ b/popylar/popylar.py @@ -26,7 +26,7 @@ def opt_out(): To opt-in again, run ``popylar.reset_uid()`` """ parser = get_or_create_config() - parser['user']['track'] = False + parser['user']['track'] = "False" with open(popylar_path, 'w') as fhandle: parser.write(fhandle)
popylar/popylar
e182d06596b8a2b0efd24adafb4a1aa494f23585
diff --git a/popylar/tests/test_popylar.py b/popylar/tests/test_popylar.py index e690377..229f0db 100644 --- a/popylar/tests/test_popylar.py +++ b/popylar/tests/test_popylar.py @@ -18,3 +18,12 @@ def test_track_event_with_version(): 'test_track_event_with_version', software_version=VERSION) assert r.status_code == 200, "response is not OK" + + +def test_opt(): + popylar.opt_out() + parser = popylar.get_or_create_config() + assert parser['user']['track'] == "False" + popylar.opt_in() + parser = popylar.get_or_create_config() + assert parser['user']['track'] == "True"
opt_out fails with TypeError Nice simple package, just what I need, but: Calling popylar.opt_out() leads to an error: ``` >>> import popylar >>> r = popylar.track_event('UA-86484662-2', 'test', 'test_track_event') >>> print r.status_code 200 >>> popylar.opt_out() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/pazcal/Library/Python/2.7/lib/python/site-packages/popylar/popylar.py", line 29, in opt_out parser['user']['track'] = False File "/Users/pazcal/Library/Python/2.7/lib/python/site-packages/backports/configparser/__init__.py", line 1283, in __setitem__ _, key, value = self._parser._validate_value_types(option=key, value=value) File "/Users/pazcal/Library/Python/2.7/lib/python/site-packages/backports/configparser/__init__.py", line 1221, in _validate_value_types raise TypeError("option values must be strings") TypeError: option values must be strings >>> ``` I can fix this by changing line 29 in popylar.py from: ``` parser['user']['track'] = False ``` to: ``` parser['user']['track'] = "False" ``` I don't mind cloning and doing this via a pull request but seems so minor a fix it's not really worth it. Charlie
0.0
e182d06596b8a2b0efd24adafb4a1aa494f23585
[ "popylar/tests/test_popylar.py::test_opt" ]
[ "popylar/tests/test_popylar.py::test_track_event", "popylar/tests/test_popylar.py::test_track_event_with_version" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
2017-07-22 13:21:42+00:00
bsd-2-clause
4,629
postlund__pyatv-1033
diff --git a/docs/api/pyatv/interface.html b/docs/api/pyatv/interface.html index f0247954..6cf36c17 100644 --- a/docs/api/pyatv/interface.html +++ b/docs/api/pyatv/interface.html @@ -857,7 +857,9 @@ class Metadata(ABC): @abstractmethod @feature(30, &#34;Artwork&#34;, &#34;Playing media artwork.&#34;) - async def artwork(self, width=512, height=None) -&gt; Optional[ArtworkInfo]: + async def artwork( + self, width: Optional[int] = 512, height: Optional[int] = None + ) -&gt; Optional[ArtworkInfo]: &#34;&#34;&#34;Return artwork for what is currently playing (or None). The parameters &#34;width&#34; and &#34;height&#34; makes it possible to request artwork of a @@ -2145,7 +2147,9 @@ in one of the listed states.</p></section> @abstractmethod @feature(30, &#34;Artwork&#34;, &#34;Playing media artwork.&#34;) - async def artwork(self, width=512, height=None) -&gt; Optional[ArtworkInfo]: + async def artwork( + self, width: Optional[int] = 512, height: Optional[int] = None + ) -&gt; Optional[ArtworkInfo]: &#34;&#34;&#34;Return artwork for what is currently playing (or None). The parameters &#34;width&#34; and &#34;height&#34; makes it possible to request artwork of a @@ -2244,7 +2248,7 @@ def device_id(self) -&gt; Optional[str]: <h3>Methods</h3> <dl> <dt id="pyatv.interface.Metadata.artwork"><code class="name flex"> -<span>async def <span class="ident">artwork</span></span>(<span>self, width=512, height=None) -> Optional[<a title="pyatv.interface.ArtworkInfo" href="#pyatv.interface.ArtworkInfo">ArtworkInfo</a>]</span> +<span>async def <span class="ident">artwork</span></span>(<span>self, width: Optional[int] = 512, height: Optional[int] = None) -> Optional[<a title="pyatv.interface.ArtworkInfo" href="#pyatv.interface.ArtworkInfo">ArtworkInfo</a>]</span> </code></dt> <dd> <section class="desc"><p>Return artwork for what is currently playing (or None).</p> @@ -2259,7 +2263,9 @@ aspect ratio.</p></section> </summary> <pre><code class="python">@abstractmethod @feature(30, &#34;Artwork&#34;, &#34;Playing media artwork.&#34;) -async def artwork(self, width=512, height=None) -&gt; Optional[ArtworkInfo]: +async def artwork( + self, width: Optional[int] = 512, height: Optional[int] = None +) -&gt; Optional[ArtworkInfo]: &#34;&#34;&#34;Return artwork for what is currently playing (or None). The parameters &#34;width&#34; and &#34;height&#34; makes it possible to request artwork of a diff --git a/pyatv/dmap/__init__.py b/pyatv/dmap/__init__.py index ccf0e827..3d5fa0d4 100644 --- a/pyatv/dmap/__init__.py +++ b/pyatv/dmap/__init__.py @@ -205,7 +205,9 @@ class BaseDmapAppleTV: self.latest_hash = self.latest_playing.hash return self.latest_playing - async def artwork(self, width, height) -> Optional[ArtworkInfo]: + async def artwork( + self, width: Optional[int], height: Optional[int] + ) -> Optional[ArtworkInfo]: """Return artwork for what is currently playing (or None).""" url = _ARTWORK_CMD.format(width=width or 0, height=height or 0) art = await self.daap.get(url, daap_data=False) @@ -405,7 +407,9 @@ class DmapMetadata(Metadata): self.apple_tv = apple_tv self.artwork_cache = Cache(limit=4) - async def artwork(self, width=512, height=None) -> Optional[ArtworkInfo]: + async def artwork( + self, width: Optional[int] = 512, height: Optional[int] = None + ) -> Optional[ArtworkInfo]: """Return artwork for what is currently playing (or None). The parameters "width" and "height" makes it possible to request artwork of a diff --git a/pyatv/interface.py b/pyatv/interface.py index 005d43c9..44aa7dcf 100644 --- a/pyatv/interface.py +++ b/pyatv/interface.py @@ -629,7 +629,9 @@ class Metadata(ABC): @abstractmethod @feature(30, "Artwork", "Playing media artwork.") - async def artwork(self, width=512, height=None) -> Optional[ArtworkInfo]: + async def artwork( + self, width: Optional[int] = 512, height: Optional[int] = None + ) -> Optional[ArtworkInfo]: """Return artwork for what is currently playing (or None). The parameters "width" and "height" makes it possible to request artwork of a diff --git a/pyatv/mrp/__init__.py b/pyatv/mrp/__init__.py index 30d17ebc..4dd3774f 100644 --- a/pyatv/mrp/__init__.py +++ b/pyatv/mrp/__init__.py @@ -420,7 +420,9 @@ class MrpMetadata(Metadata): self.psm = psm self.artwork_cache = Cache(limit=4) - async def artwork(self, width=512, height=None) -> Optional[ArtworkInfo]: + async def artwork( + self, width: Optional[int] = 512, height: Optional[int] = None + ) -> Optional[ArtworkInfo]: """Return artwork for what is currently playing (or None). The parameters "width" and "height" makes it possible to request artwork of a @@ -438,14 +440,17 @@ class MrpMetadata(Metadata): _LOGGER.debug("Retrieved artwork %s from cache", identifier) return self.artwork_cache.get(identifier) - artwork = await self._fetch_artwork(width or 0, height or -1) - if artwork: + artwork: Optional[ArtworkInfo] = None + try: + artwork = await self._fetch_artwork(width or 0, height or -1) + except Exception: + _LOGGER.warning("Artwork not present in response") + else: self.artwork_cache.put(identifier, artwork) - return artwork - return None + return artwork - async def _fetch_artwork(self, width, height): + async def _fetch_artwork(self, width, height) -> Optional[ArtworkInfo]: playing = self.psm.playing resp = await self.psm.protocol.send_and_receive( messages.playback_queue_request(playing.location, width, height)
postlund/pyatv
5df2295938d56eb679acfec024909d03f78fc69f
diff --git a/tests/fake_device/mrp.py b/tests/fake_device/mrp.py index cf452939..7d2ea31f 100644 --- a/tests/fake_device/mrp.py +++ b/tests/fake_device/mrp.py @@ -480,12 +480,15 @@ class FakeMrpService(MrpServerAuth, asyncio.Protocol): setstate = messages.create( protobuf.SET_STATE_MESSAGE, identifier=message.identifier ) - queue = setstate.inner().playbackQueue - queue.location = 0 - item = queue.contentItems.add() - item.artworkData = self.state.states[self.state.active_player].artwork - item.artworkDataWidth = state.artwork_width or 456 - item.artworkDataHeight = state.artwork_height or 789 + + artwork_data = self.state.states[self.state.active_player].artwork + if artwork_data: + queue = setstate.inner().playbackQueue + queue.location = 0 + item = queue.contentItems.add() + item.artworkData = artwork_data + item.artworkDataWidth = state.artwork_width or 456 + item.artworkDataHeight = state.artwork_height or 789 self.send_to_client(setstate) def handle_wake_device(self, message, inner): diff --git a/tests/mrp/test_mrp_functional.py b/tests/mrp/test_mrp_functional.py index f26f1390..31baa327 100644 --- a/tests/mrp/test_mrp_functional.py +++ b/tests/mrp/test_mrp_functional.py @@ -168,6 +168,19 @@ class MRPFunctionalTest(common_functional_tests.CommonFunctionalTests): await self.playing(title="dummy") self.assertEqual(self.atv.metadata.artwork_id, "some_id") + @unittest_run_loop + async def test_metadata_artwork_erroneously_available(self): + self.usecase.example_video() + + # Metadata suggests that artwork is available but no artwork is available + # when requested by client + self.usecase.change_artwork(None, ARTWORK_MIMETYPE, ARTWORK_ID) + + await self.playing(title="dummy") + + artwork = await self.atv.metadata.artwork(width=123, height=456) + self.assertIsNone(artwork) + @unittest_run_loop async def test_metadata_artwork_width_and_height(self): self.usecase.example_video()
Fetching artwork sometime fails with bad error handling **Describe the bug** As reported here: https://github.com/home-assistant/core/issues/44305#issuecomment-756771464 Stack trace: ```python Error handling request Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/aiohttp/web_protocol.py", line 422, in _handle_request resp = await self._request_handler(request) File "/usr/local/lib/python3.8/site-packages/aiohttp/web_app.py", line 499, in _handle resp = await handler(request) File "/usr/local/lib/python3.8/site-packages/aiohttp/web_middlewares.py", line 118, in impl return await handler(request) File "/usr/src/homeassistant/homeassistant/components/http/request_context.py", line 18, in request_context_middleware return await handler(request) File "/usr/src/homeassistant/homeassistant/components/http/ban.py", line 72, in ban_middleware return await handler(request) File "/usr/src/homeassistant/homeassistant/components/http/auth.py", line 127, in auth_middleware return await handler(request) File "/usr/src/homeassistant/homeassistant/components/http/view.py", line 129, in handle result = await result File "/usr/src/homeassistant/homeassistant/components/media_player/__init__.py", line 971, in get data, content_type = await player.async_get_media_image() File "/config/custom_components/apple_tv/media_player.py", line 205, in async_get_media_image artwork = await self.atv.metadata.artwork() File "/usr/local/lib/python3.8/site-packages/pyatv/mrp/__init__.py", line 441, in artwork artwork = await self._fetch_artwork(width or 0, height or -1) File "/usr/local/lib/python3.8/site-packages/pyatv/mrp/__init__.py", line 456, in _fetch_artwork item = resp.inner().playbackQueue.contentItems[playing.location] File "/usr/local/lib/python3.8/site-packages/google/protobuf/internal/containers.py", line 209, in __getitem__ return self._values[key] IndexError: list index out of range ``` I should add better error handling and just fail silently. **To Reproduce** I expect that this can happen when trying to fetch artwork when it's not available, even though pyatv thinks so. One such case is when the metadata is wrong (bug in app) or if metadata is stuck in pyatv so that it doesn't match what's playing (bug in pyatv). I might be able to reproduce in a functional test. **Expected behavior** Maybe put a debug log in there and fail silently. **System Setup (please complete the following information):** - OS: any - Python: any - pyatv: 0.7.5 - Apple TV: any with tvOS **Additional context**
0.0
5df2295938d56eb679acfec024909d03f78fc69f
[ "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_erroneously_available" ]
[ "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_always_available_features", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_basic_device_info", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_down", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_down_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_home", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_left", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_left_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_menu", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_menu_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_next", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_pause", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_play", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_play_pause", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_previous", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_right", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_right_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_select", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_select_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_stop", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_suspend", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_top_menu", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_up", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_up_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_volume_down", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_volume_up", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_wakeup", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_close_connection", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_connect_invalid_protocol", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_connect_missing_device_id", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_artwork", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_play_url", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_when_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_with_supported_commands", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_invalid_airplay_credentials_format", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_invalid_credentials_format", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_item_id_hash", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_item_updates", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_cache", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_id", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_id_no_identifier", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_none_if_not_available", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_width_and_height", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_device_id", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_loading", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_music_paused", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_music_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_none_type_when_not_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_playback_rate_device_state", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_seeking", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_video_paused", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_video_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_pair_missing_service", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_local_file", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_pause_emulation", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url_authenticated", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url_not_authenticated_error", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_app", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_immutable", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_immutable_update_content_item", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_power_state", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_power_state_acknowledgement", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_push_updater_active", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_push_updates", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_repeat_state", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_seek_in_playing_media", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_default_commands", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_repeat", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_shuffle_albums", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_shuffle_common", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_shuffle_state_albums", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_shuffle_state_common", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_skip_forward_backward", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_update_client_before_setstate", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_volume_controls" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-04-23 11:32:14+00:00
mit
4,630
postlund__pyatv-1098
diff --git a/docs/documentation/protocols.md b/docs/documentation/protocols.md index 42da2c6d..08430c35 100644 --- a/docs/documentation/protocols.md +++ b/docs/documentation/protocols.md @@ -1183,6 +1183,47 @@ CSeq: 3 This section deals with "video part" of AirPlay. TBD +### Commands + +#### /auth-setup + +Devices supporting MFi authentication (e.g. has `et=4`) might require an authentication step +initiated by `/auth-setup`. This is always the case for AirPlay 2. More details +[here](https://openairplay.github.io/airplay-spec/audio/rtsp_requests/post_auth_setup.html). + +*TODO: document more* + +The request consists of one byte encryption type (0x01: unencrypted, +0x02: MFi-SAP-encrypted AES key) and 32 bytes Curve25519 public key. Normally this step is used +to verify MFi authenticity, but no further action needs to be taken (i.e. just send request +and ignore response) for devices requiring this step. Implementation in `pyatv` has been stolen +from owntone [here](https://github.com/owntone/owntone-server/blob/c1db4d914f5cd8e7dbe6c1b6478d68a4c14824af/src/outputs/raop.c#L1568). + +**Sender -> Receiver:** +```raw +POST /auth-setup RTSP/1.0 +CSeq: 0 +User-Agent: AirPlay/540.31 +DACP-ID: BFAA2A9155BD093C +Active-Remote: 347218209 +Client-Instance: BFAA2A9155BD093C +Content-Type: application/octet-stream +Content-Length: 33 + +015902ede90d4ef2bd4cb68a6330038207a94dbd50d8aa465b5d8c012a0c7e1d4e27 +``` + +**Receiver -> Sender:** +```raw +RTSP/1.0 200 OK +Content-Length: 1076 +Content-Type: application/octet-stream +Server: AirTunes/366.0 +CSeq: 0 + +97a02c0d0a31486316de944d8404f4e01f93b05dde4543cc022a5727e8a352330000038c3082038806092a864886f70d010702a0820379308203750201013100300b06092a864886f70d010701a082035d3082035930820241a003020102020f1212aa121127aa00aa8023aa238776300d06092a864886f70d0101050500308183310b300906035504061302555331133011060355040a130a4170706c6520496e632e31263024060355040b131d4170706c652043657274696669636174696f6e20417574686f72697479313730350603550403132e4170706c652069506f64204163636573736f726965732043657274696669636174696f6e20417574686f72697479301e170d3132313132373138323135305a170d3230313132373138323135305a3070310b300906035504061302555331133011060355040a0c0a4170706c6520496e632e311f301d060355040b0c164170706c652069506f64204163636573736f72696573312b302906035504030c224950415f31323132414131323131323741413030414138303233414132333837373630819f300d06092a864886f70d010101050003818d003081890281810097e302c45e7b6f387dd390201b0dd902b19dc30d72a93a8b9f1313c6108e90ee93daff24177526736e4f1f58a2c2382cf4b7f7359bb1b1a3a7595850d489f335557a48653d96e9407ccc05eba6c867716e446b31d2bdc9c5122af4c213e7d7f0635b74e323094483a900bd3f93ce8833785b2fd14d88fb2dd4c581e1189b38250203010001a360305e301d0603551d0e04160414d74ea8b90475ee5140d2be7d2f9258931c7543cb300c0603551d130101ff04023000301f0603551d23041830168014ff4b1a439af51996ab18002b61c9ee409d8ec704300e0603551d0f0101ff0404030203b8300d06092a864886f70d0101050500038201010012e8b29b1e1b81e7a14b6435b92a9c58f0a28e6bcb645edd223969b77a70dda3ddc280562f53cb87e3ccd5fea213ccc9c2a4f005c3aa4447b84a895df649f74e9f6612d6cc69eeb7561706fa718f5e1d80b0554affe911c6fa3f99ca06bcf4debf03b64449bde16058392c830be55ae33273d24eecaf0f4aef6f6c46bed87192e2773e5ae092098b32563a532164df5eecd3fc299c8b267cf555b516b02a013920242f4162e6cb5d8d555356d3999c989860ed8c4ea2a0f34af4bcc74b864a07c6d952115dd28b0cc5d8bc780567dcaafc721e678391a048b00cf8664d5c0ad1949b57165a7c98144480ac0510a1887e27821d966b14478c901f6c7548f8563e310000000080121b14309c641bc593196f886c633d19986c11ca9cb4be2fdad1f2ec1427eeb8da23aaeaf7a713f2b8e05a6942db364e3dd408d5a1eeb1525baadc5ccb46614dadef1bfa565c65f46a54f576802209faa39ac442ac7cd43995be833f7794d0517fd93218e86c0228b30b036d3055476114d926de2875bed7cef4970492df58a3 +``` + ## References [RAOP-Player](https://github.com/philippe44/RAOP-Player) diff --git a/docs/documentation/supported_features.md b/docs/documentation/supported_features.md index e5e64285..a9203a47 100644 --- a/docs/documentation/supported_features.md +++ b/docs/documentation/supported_features.md @@ -192,8 +192,24 @@ AirTunes). * Metadata and push updates only reflect what pyatv is currently playing as there seems to not be possible to get current play state from an AirPlay receiver -* Devices requiring password, pairing or any kind of encryption are not supported, - e.g. Apple TV 4+ or AirPort Express {% include issue no="1077,1078" %} +* Devices requiring password are not supported +* Devices requiring HAP pairing, e.g. Apple TV 4+, are not supported {% include issue no="1078" %} * Track length is hardcoded to 3s for now * Remote control commands does not work, e.g. play or pause {% include issue no="1068" %} * Retransmission of lost packets is not supported {% include issue no="1079" %} + +### Verified Devices + +Audio streaming has been verified to work with these devices: + +* Apple TV 3 (v8.4.4) +* HomePod Mini (v14.5) +* AirPort Express (v7.8.1) +* Yamaha RX-V773 (v1.98) + +The following 3rd party software receivers have also been verified to work: + +* [shairport-sync](https://github.com/mikebrady/shairport-sync) (v3.3.8) + +If you have verified another device or receiver, please update the list by pressing +*Edit this page* below and opening a pull request. diff --git a/pyatv/raop/parsers.py b/pyatv/raop/parsers.py index ea47bb70..96c8f00e 100644 --- a/pyatv/raop/parsers.py +++ b/pyatv/raop/parsers.py @@ -23,6 +23,15 @@ class EncryptionType(IntFlag): FairPlaySAPv25 = 16 +class MetadataType(IntFlag): + """Metadata types supported by receiver.""" + + NotSupported = 0 + Text = 1 + Artwork = 2 + Progress = 4 + + # pylint: enable=invalid-name @@ -43,9 +52,9 @@ def get_encryption_types(properties: Mapping[str, str]) -> EncryptionType: Input format from zeroconf is a comma separated list: - 0,1,3 + et=0,1,3 - Each number represents one encryption type. + 0=unencrypted, 1=RSA, 3=FairPlay, 4=MFiSAP, 5=FairPlay SAPv2.5 """ output = EncryptionType.Unknown try: @@ -63,3 +72,27 @@ def get_encryption_types(properties: Mapping[str, str]) -> EncryptionType: 5: EncryptionType.FairPlaySAPv25, }.get(enc_type, EncryptionType.Unknown) return output + + +def get_metadata_types(properties: Mapping[str, str]) -> MetadataType: + """Return metadata types supported by receiver. + + Input format from zeroconf is comma separated list: + + md=0,1,2 + + 0=text, 1=artwork, 2=progress + """ + output = MetadataType.NotSupported + try: + md_types = [int(x) for x in properties["md"].split(",")] + except (KeyError, ValueError): + return output + else: + for md_type in md_types: + output |= { + 0: MetadataType.Text, + 1: MetadataType.Artwork, + 2: MetadataType.Progress, + }.get(md_type, MetadataType.NotSupported) + return output diff --git a/pyatv/raop/raop.py b/pyatv/raop/raop.py index fcc60fdd..5b00d0e2 100644 --- a/pyatv/raop/raop.py +++ b/pyatv/raop/raop.py @@ -15,8 +15,10 @@ from pyatv.raop.metadata import EMPTY_METADATA, AudioMetadata from pyatv.raop.packets import AudioPacketHeader, SyncPacket, TimingPacket from pyatv.raop.parsers import ( EncryptionType, + MetadataType, get_audio_properties, get_encryption_types, + get_metadata_types, ) from pyatv.raop.rtsp import FRAMES_PER_PACKET, RtspContext, RtspSession from pyatv.support import log_binary @@ -33,6 +35,8 @@ MISSING_METADATA = AudioMetadata( title="Streaming with pyatv", artist="pyatv", album="RAOP" ) +SUPPORTED_ENCRYPTIONS = EncryptionType.Unencrypted | EncryptionType.MFiSAP + class ControlClient(asyncio.Protocol): """Control client responsible for e.g. sync packets.""" @@ -240,6 +244,8 @@ class RaopClient: self.context: RtspContext = context self.control_client: Optional[ControlClient] = None self.timing_client: Optional[TimingClient] = None + self._encryption_types: EncryptionType = EncryptionType.Unknown + self._metadata_types: MetadataType = MetadataType.NotSupported self._metadata: AudioMetadata = EMPTY_METADATA self._keep_alive_task: Optional[asyncio.Future] = None self._listener: Optional[weakref.ReferenceType[Any]] = None @@ -291,11 +297,20 @@ class RaopClient: async def initialize(self, properties: Mapping[str, str]): """Initialize the session.""" + self._encryption_types = get_encryption_types(properties) + self._metadata_types = get_metadata_types(properties) + + _LOGGER.debug( + "Initializing RTSP with encryption=%s, metadata=%s", + self._encryption_types, + self._metadata_types, + ) + # Misplaced check that unencrypted data is supported - encryption_types = get_encryption_types(properties) - if EncryptionType.Unencrypted not in encryption_types: + intersection = self._encryption_types & SUPPORTED_ENCRYPTIONS + if not intersection or intersection == EncryptionType.Unknown: raise exceptions.NotSupportedError( - f"no supported encryption types in {str(encryption_types)}" + f"no supported encryption types in {str(self._encryption_types)}" ) self._update_output_properties(properties) @@ -331,6 +346,10 @@ class RaopClient: ) async def _setup_session(self): + # Do auth-setup if MFiSAP encryption is supported by receiver + if EncryptionType.MFiSAP in self._encryption_types: + await self.rtsp.auth_setup() + await self.rtsp.announce() resp = await self.rtsp.setup(self.control_client.port, self.timing_client.port) @@ -377,14 +396,18 @@ class RaopClient: # Start sending sync packets self.control_client.start(self.rtsp.remote_ip) + # Apply text metadata if it is supported self._metadata = metadata - _LOGGER.debug("Playing with metadata: %s", self.metadata) - await self.rtsp.set_metadata( - self.context.rtpseq, self.context.rtptime, self.metadata - ) + if MetadataType.Text in self._metadata_types: + _LOGGER.debug("Playing with metadata: %s", self.metadata) + await self.rtsp.set_metadata( + self.context.rtpseq, self.context.rtptime, self.metadata + ) # Start keep-alive task to ensure connection is not closed by remote device - self._keep_alive_task = asyncio.ensure_future(self._send_keep_alive()) + # but only if "text" metadata is supported + if MetadataType.Text in self._metadata_types: + self._keep_alive_task = asyncio.ensure_future(self._send_keep_alive()) listener = self.listener if listener: diff --git a/pyatv/raop/rtsp.py b/pyatv/raop/rtsp.py index 1f7d5a69..86c3c406 100644 --- a/pyatv/raop/rtsp.py +++ b/pyatv/raop/rtsp.py @@ -4,7 +4,7 @@ import logging from random import randrange import re from socket import socket -from typing import Dict, Mapping, NamedTuple, Optional, Tuple, Union +from typing import Dict, Mapping, NamedTuple, Optional, Tuple, Union, cast from pyatv import exceptions from pyatv.dmap import tags @@ -28,6 +28,20 @@ ANNOUNCE_PAYLOAD = ( + "{bits_per_channel} 40 10 14 {channels} 255 0 0 {sample_rate}\r\n" ) +# Used to signal that traffic is to be unencrypted +AUTH_SETUP_UNENCRYPTED = b"\x01" + +# Just a static Curve25519 public key used to satisfy the auth-setup step for devices +# requiring that (e.g. AirPort Express). We never verify anything. Source: +# https://github.com/owntone/owntone-server/blob/ +# c1db4d914f5cd8e7dbe6c1b6478d68a4c14824af/src/outputs/raop.c#L276 +CURVE25519_PUB_KEY = ( + b"\x59\x02\xed\xe9\x0d\x4e\xf2\xbd" + b"\x4c\xb6\x8a\x63\x30\x03\x82\x07" + b"\xa9\x4d\xbd\x50\xd8\xaa\x46\x5b" + b"\x5d\x8c\x01\x2a\x0c\x7e\x1d\x4e" +) + class RtspContext: """Data used for one RTSP session. @@ -79,16 +93,16 @@ class RtspResponse(NamedTuple): code: int message: str headers: Mapping[str, str] - body: str + body: Union[str, bytes] -def parse_response(response: str) -> Tuple[RtspResponse, str]: +def parse_response(response: bytes) -> Tuple[Optional[RtspResponse], bytes]: """Parse RTSP response.""" try: - header_str, body = response.split("\r\n\r\n", maxsplit=1) + header_str, body = response.split(b"\r\n\r\n", maxsplit=1) except ValueError as ex: raise ValueError("missing end lines") from ex - headers = header_str.split("\r\n") + headers = header_str.decode("utf-8").split("\r\n") match = re.match(r"RTSP/1.0 (\d+) (.+)", headers[0]) if not match: @@ -103,11 +117,18 @@ def parse_response(response: str) -> Tuple[RtspResponse, str]: resp_headers[key] = value content_length = int(resp_headers.get("Content-Length", 0)) - if body and len(body) < content_length: - raise ValueError("too short body") + if len(body or []) < content_length: + return None, response + + response_body: Union[str, bytes] = body[0:content_length] + + # Assume body is text unless content type is application/octet-stream + if resp_headers.get("Content-Type") != "application/octet-stream": + # We know it's bytes here + response_body = cast(bytes, response_body).decode("utf-8") return ( - RtspResponse(int(code), message, resp_headers, body[0:content_length]), + RtspResponse(int(code), message, resp_headers, response_body), body[content_length:], ) @@ -121,6 +142,7 @@ class RtspSession(asyncio.Protocol): self.transport = None self.requests: Dict[int, Tuple[asyncio.Semaphore, RtspResponse]] = {} self.cseq = 0 + self.buffer = b"" @property def local_ip(self) -> str: @@ -157,9 +179,13 @@ class RtspSession(asyncio.Protocol): def data_received(self, data: bytes) -> None: """Handle incoming RTSP data.""" _LOGGER.debug("Received: %s", data) - rest = data.decode("utf-8") - while rest != "": - parsed, rest = parse_response(rest) + self.buffer += data + while self.buffer: + parsed, self.buffer = parse_response(self.buffer) + if parsed is None: + _LOGGER.debug("Not enough data to decode message") + break + if "CSeq" in parsed.headers: cseq = int(parsed.headers["CSeq"]) if cseq in self.requests: @@ -176,6 +202,18 @@ class RtspSession(asyncio.Protocol): """Handle that connection was lost.""" _LOGGER.debug("RTSP Connection closed") + async def auth_setup(self) -> RtspResponse: + """Send auth-setup message.""" + # Payload to say that we want to proceed unencrypted + body = AUTH_SETUP_UNENCRYPTED + CURVE25519_PUB_KEY + + return await self.send_and_receive( + "POST", + uri="/auth-setup", + content_type="application/octet-stream", + body=body, + ) + async def announce(self) -> RtspResponse: """Send ANNOUNCE message.""" body = ANNOUNCE_PAYLOAD.format( @@ -249,6 +287,7 @@ class RtspSession(asyncio.Protocol): async def send_and_receive( self, method: str, + uri: Optional[str] = None, content_type: Optional[str] = None, headers: Mapping[str, object] = None, body: Union[str, bytes] = None, @@ -260,7 +299,7 @@ class RtspSession(asyncio.Protocol): if isinstance(body, str): body = body.encode("utf-8") - msg = f"{method} {self.uri} RTSP/1.0" + msg = f"{method} {uri or self.uri} RTSP/1.0" msg += f"\r\nCSeq: {cseq}" msg += f"\r\nUser-Agent: {USER_AGENT}" msg += f"\r\nDACP-ID: {self.context.dacp_id}"
postlund/pyatv
311ca61ea8efd1ca69fe4e76983c30039af8cc45
diff --git a/tests/raop/test_parsers.py b/tests/raop/test_parsers.py index c58bd541..ef71eb04 100644 --- a/tests/raop/test_parsers.py +++ b/tests/raop/test_parsers.py @@ -5,8 +5,10 @@ import pytest from pyatv.exceptions import ProtocolError from pyatv.raop.parsers import ( EncryptionType, + MetadataType, get_audio_properties, get_encryption_types, + get_metadata_types, ) @@ -19,7 +21,7 @@ from pyatv.raop.parsers import ( ({"ss": "32"}, 44100, 2, 4), ], ) -def test_parse_properties(properties, expected_sr, expected_ch, expected_ss): +def test_parse_audio_properties(properties, expected_sr, expected_ch, expected_ss): sample_rate, channels, sample_size = get_audio_properties(properties) assert sample_rate == expected_sr assert channels == expected_ch @@ -27,7 +29,7 @@ def test_parse_properties(properties, expected_sr, expected_ch, expected_ss): @pytest.mark.parametrize("properties", [{"sr": "abc"}, {"ch": "cde"}, {"ss": "fgh"}]) -def test_parse_invalid_property_raises(properties): +def test_parse_invalid_audio_property_raises(properties): with pytest.raises(ProtocolError): get_audio_properties(properties) @@ -64,3 +66,19 @@ def test_parse_encryption_include_unknown_type(): get_encryption_types({"et": "0,1000"}) == EncryptionType.Unknown | EncryptionType.Unencrypted ) + + [email protected]( + "properties,expected", + [ + ({}, MetadataType.NotSupported), + ({"md": "0"}, MetadataType.Text), + ({"md": "1"}, MetadataType.Artwork), + ( + {"md": "0,1,2"}, + MetadataType.Text | MetadataType.Artwork | MetadataType.Progress, + ), + ], +) +def test_parse_metadata_types(properties, expected): + assert get_metadata_types(properties) == expected diff --git a/tests/raop/test_rtsp.py b/tests/raop/test_rtsp.py index cdc3a8d7..dc373c18 100644 --- a/tests/raop/test_rtsp.py +++ b/tests/raop/test_rtsp.py @@ -6,51 +6,62 @@ from pyatv.raop.rtsp import parse_response def test_parse_ok_first_line(): - resp, rest = parse_response("RTSP/1.0 200 OK\r\n\r\n") + resp, rest = parse_response(b"RTSP/1.0 200 OK\r\n\r\n") assert resp.code == 200 assert resp.message == "OK" - assert rest == "" + assert rest == b"" def test_parse_missing_ending(): with pytest.raises(ValueError) as exc: - parse_response("RTSP/1.0 200 OK\r\n") + parse_response(b"RTSP/1.0 200 OK\r\n") assert "missing end lines" in str(exc) def test_parse_headers(): - resp, rest = parse_response("RTSP/1.0 200 OK\r\nA: B\r\nC: D\r\n\r\n") + resp, rest = parse_response(b"RTSP/1.0 200 OK\r\nA: B\r\nC: D\r\n\r\n") assert len(resp.headers) == 2 assert resp.headers["A"] == "B" assert resp.headers["C"] == "D" - assert rest == "" + assert rest == b"" def test_parse_body(): - resp, rest = parse_response("RTSP/1.0 200 OK\r\nContent-Length: 4\r\n\r\nbody") + resp, rest = parse_response(b"RTSP/1.0 200 OK\r\nContent-Length: 4\r\n\r\nbody") assert resp.body == "body" - assert rest == "" + assert rest == b"" + + +def test_parse_too_no_body(): + content = b"RTSP/1.0 200 OK\r\nContent-Length: 5\r\n\r\n" + resp, rest = parse_response(content) + + assert resp is None + assert rest == content def test_parse_too_short_body(): - with pytest.raises(ValueError) as exc: - parse_response("RTSP/1.0 200 OK\r\nContent-Length: 5\r\n\r\nbody") + content = b"RTSP/1.0 200 OK\r\nContent-Length: 5\r\n\r\nbody" + resp, rest = parse_response(content) - assert "too short body" in str(exc) + assert resp is None + assert rest == content def test_parse_body_excessive_data(): - resp, rest = parse_response("RTSP/1.0 200 OK\r\nContent-Length: 4\r\n\r\nbodyextra") + resp, rest = parse_response( + b"RTSP/1.0 200 OK\r\nContent-Length: 4\r\n\r\nbodyextra" + ) assert resp.body == "body" - assert rest == "extra" + assert rest == b"extra" def test_parse_sequent_messages(): resp, rest = parse_response( - "RTSP/1.0 200 OK\r\nA: B\r\n\r\n" - "RTSP/1.0 200 OK\r\nContent-Length: 2\r\n\r\nAB" - "RTSP/1.0 200 OK\r\nContent-Length: 0\r\n\r\n" + b"RTSP/1.0 200 OK\r\nA: B\r\n\r\n" + b"RTSP/1.0 200 OK\r\nContent-Length: 2\r\n\r\nAB" + b"RTSP/1.0 200 OK\r\nContent-Length: 0\r\n\r\n" ) assert resp.headers["A"] == "B" assert resp.body == "" @@ -61,4 +72,13 @@ def test_parse_sequent_messages(): resp, rest = parse_response(rest) assert resp.headers["Content-Length"] == "0" assert resp.body == "" - assert rest == "" + assert rest == b"" + + +def test_parse_raw_body(): + resp, rest = parse_response( + b"RTSP/1.0 200 OK\r\nContent-Length: 4\r\n" + + b"Content-Type: application/octet-stream\r\n\r\nbodyextra" + ) + assert resp.body == b"body" + assert rest == b"extra"
Add RSA encryption to support AirPort Express streaming **What feature would you like?** The AirPort Express requires encryption when streaming. This is well known and reverse engineered, so it shouldn't be that hard to implement. Just need to do it. **Describe the solution you'd like** No particular keys needs to be provided by the user, the private and public keys used for the crypto needs to be bundled. Otherwise this should be transparent (other than the check for supported cryptos needs to be changed). **Any other information to share?** Relates to #1059
0.0
311ca61ea8efd1ca69fe4e76983c30039af8cc45
[ "tests/raop/test_parsers.py::test_parse_audio_properties[properties0-44100-2-2]", "tests/raop/test_parsers.py::test_parse_audio_properties[properties1-22050-2-2]", "tests/raop/test_parsers.py::test_parse_audio_properties[properties2-44100-4-2]", "tests/raop/test_parsers.py::test_parse_audio_properties[properties3-44100-2-4]", "tests/raop/test_parsers.py::test_parse_invalid_audio_property_raises[properties0]", "tests/raop/test_parsers.py::test_parse_invalid_audio_property_raises[properties1]", "tests/raop/test_parsers.py::test_parse_invalid_audio_property_raises[properties2]", "tests/raop/test_parsers.py::test_parse_encryption_type[properties0-EncryptionType.Unencrypted]", "tests/raop/test_parsers.py::test_parse_encryption_type[properties1-EncryptionType.RSA]", "tests/raop/test_parsers.py::test_parse_encryption_type[properties2-EncryptionType.FairPlay]", "tests/raop/test_parsers.py::test_parse_encryption_type[properties3-EncryptionType.MFiSAP]", "tests/raop/test_parsers.py::test_parse_encryption_type[properties4-EncryptionType.FairPlaySAPv25]", "tests/raop/test_parsers.py::test_parse_encryption_type[properties5-EncryptionType.RSA|Unencrypted]", "tests/raop/test_parsers.py::test_parse_encryption_bad_types[properties0]", "tests/raop/test_parsers.py::test_parse_encryption_bad_types[properties1]", "tests/raop/test_parsers.py::test_parse_encryption_bad_types[properties2]", "tests/raop/test_parsers.py::test_parse_encryption_include_unknown_type", "tests/raop/test_parsers.py::test_parse_metadata_types[properties0-MetadataType.NotSupported]", "tests/raop/test_parsers.py::test_parse_metadata_types[properties1-MetadataType.Text]", "tests/raop/test_parsers.py::test_parse_metadata_types[properties2-MetadataType.Artwork]", "tests/raop/test_parsers.py::test_parse_metadata_types[properties3-MetadataType.Progress|Artwork|Text]", "tests/raop/test_rtsp.py::test_parse_ok_first_line", "tests/raop/test_rtsp.py::test_parse_missing_ending", "tests/raop/test_rtsp.py::test_parse_headers", "tests/raop/test_rtsp.py::test_parse_body", "tests/raop/test_rtsp.py::test_parse_too_no_body", "tests/raop/test_rtsp.py::test_parse_too_short_body", "tests/raop/test_rtsp.py::test_parse_body_excessive_data", "tests/raop/test_rtsp.py::test_parse_sequent_messages", "tests/raop/test_rtsp.py::test_parse_raw_body" ]
[]
{ "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-05-20 17:46:38+00:00
mit
4,631
postlund__pyatv-1099
diff --git a/docs/documentation/supported_features.md b/docs/documentation/supported_features.md index a9203a47..102c27ef 100644 --- a/docs/documentation/supported_features.md +++ b/docs/documentation/supported_features.md @@ -196,7 +196,6 @@ AirTunes). * Devices requiring HAP pairing, e.g. Apple TV 4+, are not supported {% include issue no="1078" %} * Track length is hardcoded to 3s for now * Remote control commands does not work, e.g. play or pause {% include issue no="1068" %} -* Retransmission of lost packets is not supported {% include issue no="1079" %} ### Verified Devices diff --git a/pyatv/raop/fifo.py b/pyatv/raop/fifo.py new file mode 100644 index 00000000..714d5d6b --- /dev/null +++ b/pyatv/raop/fifo.py @@ -0,0 +1,81 @@ +"""Simple FIFO for packets based on dict. + +This FIFO holds a certain number of elements as defined by upper_limit. Each item maps a +sequence number to a packet, allowing fast look up of a certain packet. The order is +defined by insertion order and *not* sequence number order. + +When upper limit is exceeded, the item that was *inserted* last, is removed. + +Example: +fifo = PacketFifo(2) +fifo[1] = 123 +fifo[2] = 456 +print(fifo[1], fifo[2]) +""" + +from typing import Dict, Iterator, MutableMapping, TypeVar + +T = TypeVar("T") + + +class PacketFifo(MutableMapping[int, T]): # pylint: disable=too-many-ancestors + """Implementation of simple packet FIFO.""" + + _items: Dict[int, T] + + def __init__(self, upper_limit: int) -> None: + """Initialize a new PacketFifo instance.""" + self._items = {} + self._upper_limit = upper_limit + + def clear(self): + """Remove all items in FIFO.""" + self._items.clear() + + def __len__(self) -> int: + """Return number of items in FIFO.""" + return len(self._items) + + def __setitem__(self, index: int, value: T): + """Add an items to FIFO.""" + if isinstance(index, int): + # Cannot add item with same index again + if index in self._items: + raise ValueError(f"{index} already in FIFO") + + # Remove oldest item if limit is exceeded + if len(self) + 1 > self._upper_limit: + del self._items[list(self._items.keys())[0]] + + self._items[index] = value + else: + raise TypeError("only int supported as key") + + def __delitem__(self, index: int) -> None: + """Remove item from FIFO.""" + raise NotImplementedError("removing items not supported") + + def __iter__(self) -> Iterator[int]: + """Iterate over indices in FIFO.""" + return self._items.__iter__() + + def __getitem__(self, index: int) -> T: + """Return value of an item.""" + if isinstance(index, int): + return self._items[index] + raise TypeError("only int supported as key") + + def __contains__(self, index: object) -> bool: + """Return if an element exists in FIFO.""" + return index in self._items + + def __str__(self) -> str: + """Return string representation of FIFO. + + Only index numbers are returned in the string. + """ + return str(list(self._items.keys())) + + def __repr__(self) -> str: + """Return internal representation as string of FIFO.""" + return repr(self._items) diff --git a/pyatv/raop/packets.py b/pyatv/raop/packets.py index 8706b633..9b977bd0 100644 --- a/pyatv/raop/packets.py +++ b/pyatv/raop/packets.py @@ -55,3 +55,7 @@ AudioPacketHeader = RtpHeader.extend( timestamp="I", ssrc="I", ) + +RetransmitReqeust = RtpHeader.extend( + "RetransmitPacket", lost_seqno="H", lost_packets="H" +) diff --git a/pyatv/raop/raop.py b/pyatv/raop/raop.py index 5b00d0e2..a58a1d80 100644 --- a/pyatv/raop/raop.py +++ b/pyatv/raop/raop.py @@ -11,8 +11,14 @@ from bitarray import bitarray from pyatv import exceptions from pyatv.raop import timing +from pyatv.raop.fifo import PacketFifo from pyatv.raop.metadata import EMPTY_METADATA, AudioMetadata -from pyatv.raop.packets import AudioPacketHeader, SyncPacket, TimingPacket +from pyatv.raop.packets import ( + AudioPacketHeader, + RetransmitReqeust, + SyncPacket, + TimingPacket, +) from pyatv.raop.parsers import ( EncryptionType, MetadataType, @@ -28,6 +34,9 @@ _LOGGER = logging.getLogger(__name__) # When being late, compensate by sending at most these many packets to catch up MAX_PACKETS_COMPENSATE = 3 +# We should store this many packets in case retransmission is requested +PACKET_BACKLOG_SIZE = 1000 + KEEP_ALIVE_INTERVAL = 25 # Seconds # Metadata used when no metadata is present @@ -41,10 +50,11 @@ SUPPORTED_ENCRYPTIONS = EncryptionType.Unencrypted | EncryptionType.MFiSAP class ControlClient(asyncio.Protocol): """Control client responsible for e.g. sync packets.""" - def __init__(self, context: RtspContext): + def __init__(self, context: RtspContext, packet_backlog: PacketFifo): """Initialize a new ControlClient.""" self.transport = None self.context = context + self.packet_backlog = packet_backlog self.task: Optional[asyncio.Future] = None def close(self): @@ -118,10 +128,31 @@ class ControlClient(asyncio.Protocol): """Handle that connection succeeded.""" self.transport = transport - @staticmethod - def datagram_received(data, addr): + def datagram_received(self, data, addr): """Handle incoming control data.""" - _LOGGER.debug("Received control data from %s: %s", addr, data) + actual_type = data[1] & 0x7F # Remove marker bit + + if actual_type == 0x55: + self._retransmit_lost_packets(RetransmitReqeust.decode(data), addr) + else: + _LOGGER.debug("Received unhandled control data from %s: %s", addr, data) + + def _retransmit_lost_packets(self, request: RetransmitReqeust, addr): + _LOGGER.debug("%s from %s", request, addr) + + for i in range(request.lost_packets): + if request.lost_seqno + i in self.packet_backlog: + packet = self.packet_backlog[request.lost_seqno + i] + + # Very "low level" here just because it's simple and avoids + # unnecessary conversions + original_seqno = packet[2:4] + resp = b"\x80\xD6" + original_seqno + packet + + if self.transport: + self.transport.sendto(resp, addr) + else: + _LOGGER.debug("Packet %d not in backlog", request.lost_seqno + 1) @staticmethod def error_received(exc): @@ -244,6 +275,7 @@ class RaopClient: self.context: RtspContext = context self.control_client: Optional[ControlClient] = None self.timing_client: Optional[TimingClient] = None + self._packet_backlog: PacketFifo = PacketFifo(PACKET_BACKLOG_SIZE) self._encryption_types: EncryptionType = EncryptionType.Unknown self._metadata_types: MetadataType = MetadataType.NotSupported self._metadata: AudioMetadata = EMPTY_METADATA @@ -316,7 +348,8 @@ class RaopClient: self._update_output_properties(properties) (_, control_client) = await self.loop.create_datagram_endpoint( - lambda: ControlClient(self.context), local_addr=(self.rtsp.local_ip, 0) + lambda: ControlClient(self.context, self._packet_backlog), + local_addr=(self.rtsp.local_ip, 0), ) (_, timing_client) = await self.loop.create_datagram_endpoint( TimingClient, local_addr=(self.rtsp.local_ip, 0) @@ -422,6 +455,7 @@ class RaopClient: except Exception as ex: raise exceptions.ProtocolError("an error occurred during streaming") from ex finally: + self._packet_backlog.clear() # Don't keep old packets around (big!) if transport: transport.close() if self._keep_alive_task: @@ -518,16 +552,21 @@ class RaopClient: for i in range(0, len(frames), 2): audio.frombytes(bytes([frames[i + 1], frames[i]])) + if transport.is_closing(): + _LOGGER.warning("Connection closed while streaming audio") + return 0 + + packet = header + audio.tobytes() + + # Add packet to backlog before sending + self._packet_backlog[self.context.rtpseq] = packet + transport.sendto(packet) + self.context.rtpseq = (self.context.rtpseq + 1) % (2 ** 16) self.context.head_ts += int( len(frames) / (self.context.channels * self.context.bytes_per_channel) ) - if transport.is_closing(): - _LOGGER.warning("Connection closed while streaming audio") - return 0 - - transport.sendto(header + audio.tobytes()) return int( len(frames) / (self.context.channels * self.context.bytes_per_channel) )
postlund/pyatv
5f5ad5b669fa72c0bfbb1ee99d8dd2acee0fd2eb
diff --git a/tests/raop/test_fifo.py b/tests/raop/test_fifo.py new file mode 100644 index 00000000..c0d6f072 --- /dev/null +++ b/tests/raop/test_fifo.py @@ -0,0 +1,126 @@ +"""Unit tests for pyatv.raop.fifo.""" + +import pytest + +from pyatv.raop.fifo import PacketFifo + + +def test_add_to_fifo(): + fifo = PacketFifo(10) + assert not fifo + + fifo[123] = 456 + assert len(fifo) == 1 + assert fifo + + +def test_add_existing_to_fifo(): + fifo = PacketFifo(10) + fifo[123] = 456 + + with pytest.raises(ValueError): + fifo[123] = 789 + + +def test_add_index_not_int_raises(): + fifo = PacketFifo(10) + + with pytest.raises(TypeError): + fifo["test"] = 123 + + +def test_get_missing_from_fifo(): + fifo = PacketFifo(10) + + with pytest.raises(KeyError): + fifo[123] + + +def test_get_index_not_int_raises(): + fifo = PacketFifo(10) + + with pytest.raises(TypeError): + fifo["test"] + + +def test_in_operator(): + fifo = PacketFifo(10) + assert 1 not in fifo + + fifo[1] = 0 + assert 1 in fifo + + +def test_add_multiple(): + fifo = PacketFifo(10) + fifo[0] = 1 + fifo[1] = 2 + fifo[2] = 3 + assert fifo[0] == 1 + assert fifo[1] == 2 + assert fifo[2] == 3 + assert len(fifo) == 3 + assert fifo + + +def test_add_overflow_removes_oldest(): + fifo = PacketFifo(2) + fifo[0] = 1 + fifo[1] = 2 + + fifo[2] = 3 + assert len(fifo) == 2 + assert 0 not in fifo + assert fifo[1] == 2 + assert fifo[2] == 3 + + fifo[3] = 4 + assert len(fifo) == 2 + assert 1 not in fifo + assert fifo[2] == 3 + assert fifo[3] == 4 + + +def test_clear_fifo(): + fifo = PacketFifo(2) + fifo[0] = 1 + fifo[1] = 2 + fifo.clear() + assert len(fifo) == 0 + + +def test_del_not_supported(): + fifo = PacketFifo(2) + fifo[0] = 1 + + with pytest.raises(NotImplementedError): + del fifo[0] + + +def test_iter_over_indices(): + fifo = PacketFifo(10) + fifo[1] = 1 + fifo[2] = 2 + fifo[3] = 2 + + sum = 0 + for i in fifo: + sum += i + + assert sum == (1 + 2 + 3) + + +def test_str(): + fifo = PacketFifo(2) + assert str(fifo) == "[]" + fifo[1] = 2 + fifo[2] = 3 + assert str(fifo) == "[1, 2]" + + +def test_repr(): + fifo = PacketFifo(2) + assert repr(fifo) == "{}" + fifo[1] = 2 + fifo[2] = 3 + assert repr(fifo) == "{1: 2, 2: 3}"
Support retransmission of lost packets when streaming **Short feature/function description** An AirPlay receiver can request old packets to be retransmitted in case they are missing. According to [here](https://git.zx2c4.com/Airtunes2/about/#constants), 1000 packets shall be buffered. FIFO is assumed. Retransmission requests are made on the control channel. **What needs to be done?** * Create a FIFO (ring buffer) for all packets and attach sequence number * Handle retransmission requests on the control channel **Is this a breaking change?** No **Anything else worth knowing?** Assuming two channels, sample size of 16bit and 352 frames per packet, this will consume a bit of memory: 2 * 2 * 352 * 1000=1,4MB
0.0
5f5ad5b669fa72c0bfbb1ee99d8dd2acee0fd2eb
[ "tests/raop/test_fifo.py::test_add_to_fifo", "tests/raop/test_fifo.py::test_add_existing_to_fifo", "tests/raop/test_fifo.py::test_add_index_not_int_raises", "tests/raop/test_fifo.py::test_get_missing_from_fifo", "tests/raop/test_fifo.py::test_get_index_not_int_raises", "tests/raop/test_fifo.py::test_in_operator", "tests/raop/test_fifo.py::test_add_multiple", "tests/raop/test_fifo.py::test_add_overflow_removes_oldest", "tests/raop/test_fifo.py::test_clear_fifo", "tests/raop/test_fifo.py::test_del_not_supported", "tests/raop/test_fifo.py::test_iter_over_indices", "tests/raop/test_fifo.py::test_str", "tests/raop/test_fifo.py::test_repr" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-05-20 19:43:20+00:00
mit
4,632
postlund__pyatv-1310
diff --git a/pyatv/core/facade.py b/pyatv/core/facade.py index 1a531d7a..17ad213d 100644 --- a/pyatv/core/facade.py +++ b/pyatv/core/facade.py @@ -287,9 +287,10 @@ class FacadePower(Relayer, interface.Power, interface.PowerListener): class FacadeStream(Relayer, interface.Stream): # pylint: disable=too-few-public-methods """Facade implementation for stream functionality.""" - def __init__(self): + def __init__(self, features: interface.Features): """Initialize a new FacadeStream instance.""" super().__init__(interface.Stream, DEFAULT_PRIORITIES) + self._features = features def close(self) -> None: """Close connection and release allocated resources.""" @@ -297,6 +298,9 @@ class FacadeStream(Relayer, interface.Stream): # pylint: disable=too-few-public async def play_url(self, url: str, **kwargs) -> None: """Play media from an URL on the device.""" + if not self._features.in_state(FeatureState.Available, FeatureName.PlayUrl): + raise exceptions.NotSupportedError("play_url is not supported") + await self.relay("play_url")(url, **kwargs) async def stream_file(self, file: Union[str, io.BufferedReader], **kwargs) -> None: @@ -376,7 +380,7 @@ class FacadeAppleTV(interface.AppleTV): interface.Metadata: FacadeMetadata(), interface.Power: FacadePower(), interface.PushUpdater: self._push_updates, - interface.Stream: FacadeStream(), + interface.Stream: FacadeStream(self._features), interface.Apps: FacadeApps(), interface.Audio: FacadeAudio(), } diff --git a/pyatv/protocols/airplay/__init__.py b/pyatv/protocols/airplay/__init__.py index a4fdbf9b..ceab13d6 100644 --- a/pyatv/protocols/airplay/__init__.py +++ b/pyatv/protocols/airplay/__init__.py @@ -25,6 +25,7 @@ from pyatv.interface import ( from pyatv.protocols import mrp from pyatv.protocols.airplay import remote_control from pyatv.protocols.airplay.auth import extract_credentials, verify_connection +from pyatv.protocols.airplay.features import AirPlayFlags, parse from pyatv.protocols.airplay.mrp_connection import AirPlayMrpConnection from pyatv.protocols.airplay.pairing import ( AirPlayPairingHandler, @@ -47,11 +48,14 @@ class AirPlayFeatures(Features): def __init__(self, service: conf.AirPlayService) -> None: """Initialize a new AirPlayFeatures instance.""" self.service = service + self._features = parse(self.service.properties.get("features", "0x0")) def get_feature(self, feature_name: FeatureName) -> FeatureInfo: """Return current state of a feature.""" - has_credentials = self.service.credentials - if feature_name == FeatureName.PlayUrl and has_credentials: + if feature_name == FeatureName.PlayUrl and ( + AirPlayFlags.SupportsAirPlayVideoV1 in self._features + or AirPlayFlags.SupportsAirPlayVideoV2 in self._features + ): return FeatureInfo(FeatureState.Available) return FeatureInfo(FeatureState.Unavailable) diff --git a/pyatv/protocols/airplay/auth/__init__.py b/pyatv/protocols/airplay/auth/__init__.py index e497c960..cc1fe4af 100644 --- a/pyatv/protocols/airplay/auth/__init__.py +++ b/pyatv/protocols/airplay/auth/__init__.py @@ -122,8 +122,8 @@ def extract_credentials(service: BaseService) -> HapCredentials: features = ft.parse(service.properties.get("features", "0x0")) if ( - ft.AirPlayFeatures.SupportsSystemPairing in features - or ft.AirPlayFeatures.SupportsCoreUtilsPairingAndEncryption in features + ft.AirPlayFlags.SupportsSystemPairing in features + or ft.AirPlayFlags.SupportsCoreUtilsPairingAndEncryption in features ): return TRANSIENT_CREDENTIALS diff --git a/pyatv/protocols/airplay/features.py b/pyatv/protocols/airplay/features.py index 743f8228..4b23791b 100644 --- a/pyatv/protocols/airplay/features.py +++ b/pyatv/protocols/airplay/features.py @@ -5,7 +5,7 @@ import re # pylint: disable=invalid-name -class AirPlayFeatures(IntFlag): +class AirPlayFlags(IntFlag): """Features supported by AirPlay.""" SupportsAirPlayVideoV1 = 1 << 0 @@ -54,7 +54,7 @@ class AirPlayFeatures(IntFlag): # pylint: enable=invalid-name -def parse(features: str) -> AirPlayFeatures: +def parse(features: str) -> AirPlayFlags: """Parse an AirPlay feature string and return what is supported. A feature string have one of the following formats: @@ -68,4 +68,4 @@ def parse(features: str) -> AirPlayFeatures: value, upper = match.groups() if upper is not None: value = upper + value - return AirPlayFeatures(int(value, 16)) + return AirPlayFlags(int(value, 16)) diff --git a/pyatv/protocols/airplay/pairing.py b/pyatv/protocols/airplay/pairing.py index 2c2d50bc..3b02eb8d 100644 --- a/pyatv/protocols/airplay/pairing.py +++ b/pyatv/protocols/airplay/pairing.py @@ -6,7 +6,7 @@ from pyatv import conf, exceptions from pyatv.auth.hap_pairing import PairSetupProcedure from pyatv.interface import BaseService, PairingHandler from pyatv.protocols.airplay.auth import AuthenticationType, pair_setup -from pyatv.protocols.airplay.features import AirPlayFeatures, parse +from pyatv.protocols.airplay.features import AirPlayFlags, parse from pyatv.support import error_handler from pyatv.support.http import ClientSessionManager, HttpConnection, http_connect @@ -18,7 +18,7 @@ def get_preferred_auth_type(service: BaseService) -> AuthenticationType: features_string = service.properties.get("features") if features_string: features = parse(features_string) - if AirPlayFeatures.SupportsCoreUtilsPairingAndEncryption in features: + if AirPlayFlags.SupportsCoreUtilsPairingAndEncryption in features: return AuthenticationType.HAP return AuthenticationType.Legacy diff --git a/pyatv/protocols/raop/__init__.py b/pyatv/protocols/raop/__init__.py index 65b38b06..fffe3caf 100644 --- a/pyatv/protocols/raop/__init__.py +++ b/pyatv/protocols/raop/__init__.py @@ -506,7 +506,7 @@ def pair( raise exceptions.NotSupportedError("pairing not required") parsed = ap_features.parse(features) - if ap_features.AirPlayFeatures.SupportsLegacyPairing not in parsed: + if ap_features.AirPlayFlags.SupportsLegacyPairing not in parsed: raise exceptions.NotSupportedError("legacy pairing not supported") return AirPlayPairingHandler(
postlund/pyatv
93399541299595718a250c497ff88b0e2482fd88
diff --git a/tests/common_functional_tests.py b/tests/common_functional_tests.py index 8e015459..1cc143d2 100644 --- a/tests/common_functional_tests.py +++ b/tests/common_functional_tests.py @@ -518,9 +518,10 @@ class CommonFunctionalTests(AioHTTPTestCase): @unittest_run_loop async def test_features_play_url(self): - # TODO: The test always sets up AirPlay, so PlayUrl will always be available. - # In the future (after migrating to pytest fixtures), I will add a test where - # AirPlay is not available. + # TODO: As availability is based on zeroconf properties, this test just + # verifies that PlayUrl is available. It's hard to change zeroconf properties + # between test runs here, so better tests will be written when dedicated + # functional tests for AirPlay are written. self.assertFeatures(FeatureState.Available, FeatureName.PlayUrl) @unittest_run_loop diff --git a/tests/conftest.py b/tests/conftest.py index ac7921e5..e78414eb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -82,7 +82,7 @@ def stub_heartbeat_loop(request): async def session_manager(): session_manager = await create_session() yield session_manager - session_manager.close() + await session_manager.close() @pytest.fixture diff --git a/tests/core/test_facade.py b/tests/core/test_facade.py index 6bfe99f2..da7df953 100644 --- a/tests/core/test_facade.py +++ b/tests/core/test_facade.py @@ -21,8 +21,11 @@ from pyatv.interface import ( FeatureState, Power, PushUpdater, + Stream, ) +TEST_URL = "http://test" + pytestmark = pytest.mark.asyncio @@ -129,6 +132,14 @@ class DummyDeviceListener(DeviceListener): self.closed_calls += 1 +class DummyStream(Stream): + def __init__(self) -> None: + self.url = None + + async def play_url(self, url: str, **kwargs) -> None: + self.url = url + + @pytest.fixture(name="register_interface") def register_interface_fixture(facade_dummy): def _register_func(feature: FeatureName, instance, protocol: Protocol): @@ -415,3 +426,27 @@ async def tests_device_info_from_multiple_protocols(facade_dummy, register_inter assert dev_info.operating_system == OperatingSystem.TvOS assert dev_info.version == "1.0" assert dev_info.build_number == "ABC" + + +async def test_stream_play_url_not_available(facade_dummy, register_interface): + stream, _ = register_interface(FeatureName.Volume, DummyStream(), Protocol.RAOP) + + await facade_dummy.connect() + + with pytest.raises(exceptions.NotSupportedError): + await facade_dummy.stream.play_url(TEST_URL) + + +async def test_stream_play_url_available(facade_dummy, register_interface): + stream, _ = register_interface(FeatureName.PlayUrl, DummyStream(), Protocol.RAOP) + + # play_url requires FeatureName.PlayUrl to be available, so add the feature interface + register_interface( + FeatureName.PlayUrl, DummyFeatures(FeatureName.PlayUrl), Protocol.DMAP + ) + + await facade_dummy.connect() + + await facade_dummy.stream.play_url(TEST_URL) + + assert stream.url == TEST_URL diff --git a/tests/fake_udns.py b/tests/fake_udns.py index bf5e1c43..7074b2c8 100644 --- a/tests/fake_udns.py +++ b/tests/fake_udns.py @@ -58,7 +58,7 @@ def airplay_service( name=atv_name, addresses=addresses, port=port, - properties={"deviceid": deviceid.encode("utf-8")}, + properties={"deviceid": deviceid.encode("utf-8"), "features": b"0x1"}, model=model, ) return ("_airplay._tcp.local", service) diff --git a/tests/protocols/airplay/test_airplay_interface.py b/tests/protocols/airplay/test_airplay_interface.py new file mode 100644 index 00000000..814ff899 --- /dev/null +++ b/tests/protocols/airplay/test_airplay_interface.py @@ -0,0 +1,23 @@ +"""Unit tests for interface implementations in pyatv.protocols.airplay.""" + +import pytest + +from pyatv.conf import AirPlayService +from pyatv.const import FeatureName, FeatureState +from pyatv.protocols.airplay import AirPlayFeatures + +# AirPlayFeatures + + [email protected]( + "flags,expected_state", + [ + ("0x0,0x0", FeatureState.Unavailable), + ("0x1,0x0", FeatureState.Available), # VideoV1 + ("0x00000000,0x20000", FeatureState.Available), # VideoV2 + ], +) +def test_feature_play_url(flags, expected_state): + service = AirPlayService("id", properties={"features": flags}) + features = AirPlayFeatures(service) + assert features.get_feature(FeatureName.PlayUrl).state == expected_state diff --git a/tests/protocols/airplay/test_features.py b/tests/protocols/airplay/test_features.py index 2f8f554f..921d6634 100644 --- a/tests/protocols/airplay/test_features.py +++ b/tests/protocols/airplay/test_features.py @@ -1,26 +1,26 @@ """Unit tests for pyatv.protocols.airplay.features.""" import pytest -from pyatv.protocols.airplay.features import AirPlayFeatures, parse +from pyatv.protocols.airplay.features import AirPlayFlags, parse @pytest.mark.parametrize( "flags,output", [ # Single feature flag - ("0x00000001", AirPlayFeatures.SupportsAirPlayVideoV1), + ("0x00000001", AirPlayFlags.SupportsAirPlayVideoV1), ( "0x40000003", - AirPlayFeatures.HasUnifiedAdvertiserInfo - | AirPlayFeatures.SupportsAirPlayPhoto - | AirPlayFeatures.SupportsAirPlayVideoV1, + AirPlayFlags.HasUnifiedAdvertiserInfo + | AirPlayFlags.SupportsAirPlayPhoto + | AirPlayFlags.SupportsAirPlayVideoV1, ), # Dual feature flag ( "0x00000003,0x00000001", - AirPlayFeatures.IsCarPlay - | AirPlayFeatures.SupportsAirPlayPhoto - | AirPlayFeatures.SupportsAirPlayVideoV1, + AirPlayFlags.IsCarPlay + | AirPlayFlags.SupportsAirPlayPhoto + | AirPlayFlags.SupportsAirPlayVideoV1, ), ], ) diff --git a/tests/protocols/dmap/test_dmap_functional.py b/tests/protocols/dmap/test_dmap_functional.py index a1f7d878..8afc3489 100644 --- a/tests/protocols/dmap/test_dmap_functional.py +++ b/tests/protocols/dmap/test_dmap_functional.py @@ -77,7 +77,10 @@ class DMAPFunctionalTest(common_functional_tests.CommonFunctionalTests): async def get_connected_device(self, hsgid): self.dmap_service = DmapService("dmapid", hsgid, port=self.server.port) self.airplay_service = AirPlayService( - "airplay_id", self.server.port, DEVICE_CREDENTIALS + "airplay_id", + self.server.port, + DEVICE_CREDENTIALS, + properties={"features": "0x1"}, # AirPlayVideoV1 supported ) self.conf = AppleTV(ipaddress.IPv4Address("127.0.0.1"), "Apple TV") self.conf.add_service(self.dmap_service) diff --git a/tests/protocols/mrp/test_mrp_functional.py b/tests/protocols/mrp/test_mrp_functional.py index 13f620c7..c639912b 100644 --- a/tests/protocols/mrp/test_mrp_functional.py +++ b/tests/protocols/mrp/test_mrp_functional.py @@ -54,7 +54,12 @@ class MRPFunctionalTest(common_functional_tests.CommonFunctionalTests): MrpService("mrp_id", self.fake_atv.get_port(Protocol.MRP)) ) self.conf.add_service( - AirPlayService("airplay_id", self.server.port, DEVICE_CREDENTIALS) + AirPlayService( + "airplay_id", + self.server.port, + DEVICE_CREDENTIALS, + properties={"features": "0x1"}, # AirPlayVideoV1 supported + ) ) self.atv = await self.get_connected_device()
Enhance feature detection of play_url and improve error handling ### What feature would you like? Feature availability of `play_url` is solely based on credentials being available or not. This is generally the case as credentials are needed for AirPlay on the Apple TV (which supports `play_url`) and not on the HomePod or other AirPlay receivers (which does *not* support `play_url`). An edge case is older Apple TV devices with device verification turned off, where it will reported as unsupported even if is supported. ### Describe the solution you'd like A better way is to check if `SupportsAirPlayVideoV1` or `SupportsAirPlayVideoV2` is present in the AirPlay feature flags. An additional enhancement is to check availability of that feature in `play_url` and raise an exception in case it is not. This will make it more clear to users calling that function, expecting it to work on the HomePod. Preferably in the facade. ### Any other information to share? This should be included in 0.9.0 as I find it important.
0.0
93399541299595718a250c497ff88b0e2482fd88
[ "tests/core/test_facade.py::test_connect_with_no_protocol", "tests/core/test_facade.py::test_connect_again_raises", "tests/core/test_facade.py::test_add_after_connect_raises", "tests/core/test_facade.py::test_interface_not_exposed_prior_to_connect", "tests/core/test_facade.py::test_features_multi_instances", "tests/core/test_facade.py::test_ignore_already_connected_protocol", "tests/core/test_facade.py::test_features_feature_overlap_uses_priority", "tests/core/test_facade.py::test_features_push_updates", "tests/core/test_facade.py::test_power_prefer_companion[FeatureName.TurnOn-turn_on]", "tests/core/test_facade.py::test_power_prefer_companion[FeatureName.TurnOff-turn_off]", "tests/core/test_facade.py::test_audio_get_volume_out_of_range[-0.1]", "tests/core/test_facade.py::test_audio_get_volume_out_of_range[100.1]", "tests/core/test_facade.py::test_audio_set_volume_out_of_range[-0.1]", "tests/core/test_facade.py::test_audio_set_volume_out_of_range[100.1]", "tests/core/test_facade.py::tests_device_info_from_single_protocol", "tests/core/test_facade.py::tests_device_info_from_multiple_protocols", "tests/core/test_facade.py::test_stream_play_url_not_available", "tests/core/test_facade.py::test_stream_play_url_available", "tests/protocols/airplay/test_airplay_interface.py::test_feature_play_url[0x0,0x0-FeatureState.Unavailable]", "tests/protocols/airplay/test_airplay_interface.py::test_feature_play_url[0x1,0x0-FeatureState.Available]", "tests/protocols/airplay/test_airplay_interface.py::test_feature_play_url[0x00000000,0x20000-FeatureState.Available]", "tests/protocols/airplay/test_features.py::test_parse_features[0x00000001-AirPlayFlags.SupportsAirPlayVideoV1]", "tests/protocols/airplay/test_features.py::test_parse_features[0x40000003-AirPlayFlags.HasUnifiedAdvertiserInfo|SupportsAirPlayPhoto|SupportsAirPlayVideoV1]", "tests/protocols/airplay/test_features.py::test_parse_features[0x00000003,0x00000001-AirPlayFlags.IsCarPlay|SupportsAirPlayPhoto|SupportsAirPlayVideoV1]", "tests/protocols/airplay/test_features.py::test_bad_input[foo]", "tests/protocols/airplay/test_features.py::test_bad_input[1234]", "tests/protocols/airplay/test_features.py::test_bad_input[0x00000001,]", "tests/protocols/airplay/test_features.py::test_bad_input[,0x00000001]", "tests/protocols/airplay/test_features.py::test_bad_input[0x00000001,0x00000001,0x00000001]", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_always_available_features", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_always_unknown_features", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_app_not_supported", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_audio_volume_controls", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_basic_device_info", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_down", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_left", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_menu", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_next", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_pause", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_play", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_play_pause", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_previous", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_right", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_select", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_stop", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_top_menu", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_unsupported_raises", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_up", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_volume_down", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_button_volume_up", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_close_connection", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_connect_failed", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_connect_missing_device_id", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_connect_no_service", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_connection_lost", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_features_play_url", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_features_shuffle_repeat", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_features_when_playing", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_invalid_airplay_credentials_format", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_invalid_credentials_format", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_login_with_pairing_guid_succeed", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_artwork", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_artwork_cache", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_artwork_none_if_not_available", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_artwork_size", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_device_id", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_loading", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_music_paused", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_music_playing", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_none_type_when_not_playing", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_seeking", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_video_paused", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_metadata_video_playing", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_pair_missing_service", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_play_local_file", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_play_url", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_play_url_authenticated", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_play_url_no_service", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_play_url_not_authenticated_error", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_playing_immutable", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_push_updater_active", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_push_updates", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_relogin_if_session_expired", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_repeat_state", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_reset_revision_if_push_updates_fail", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_seek_in_playing_media", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_set_repeat", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_set_shuffle_albums", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_set_shuffle_common", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_shuffle_state_albums", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_shuffle_state_common", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_skip_forward_backward", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_unsupported_features", "tests/protocols/dmap/test_dmap_functional.py::DMAPFunctionalTest::test_volume_controls", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_always_available_features", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_audio_volume_controls", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_audio_volume_down_below_zero", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_audio_volume_down_decreases_volume", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_audio_volume_up_above_max", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_audio_volume_up_increases_volume", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_basic_device_info", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_down", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_down_actions", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_home", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_home_hold", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_left", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_left_actions", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_menu", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_menu_actions", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_next", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_pause", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_play", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_play_pause", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_previous", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_right", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_right_actions", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_select", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_select_actions", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_stop", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_suspend", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_top_menu", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_up", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_up_actions", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_volume_down", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_volume_up", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_wakeup", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_close_connection", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_connect_missing_device_id", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_connect_no_service", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_artwork", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_play_url", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_when_playing", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_with_supported_commands", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_invalid_airplay_credentials_format", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_invalid_credentials_format", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_item_id_hash", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_item_updates", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_cache", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_erroneously_available", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_id", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_id_no_identifier", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_none_if_not_available", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_width_and_height", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_device_id", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_loading", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_music_paused", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_music_playing", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_none_type_when_not_playing", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_playback_rate_device_state", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_seeking", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_tv_playing", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_video_paused", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_video_playing", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_pair_missing_service", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_local_file", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_pause_emulation", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url_authenticated", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url_not_authenticated_error", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_app", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_immutable", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_immutable_update_content_item", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_power_state", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_power_state_acknowledgement", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_push_updater_active", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_push_updates", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_repeat_state", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_seek_in_playing_media", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_default_commands", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_repeat", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_shuffle_albums", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_shuffle_common", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_shuffle_state_albums", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_shuffle_state_common", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_skip_forward_backward", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_update_client_before_setstate", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_volume_change", "tests/protocols/mrp/test_mrp_functional.py::MRPFunctionalTest::test_volume_controls" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
2021-09-15 20:01:29+00:00
mit
4,633
postlund__pyatv-923
diff --git a/pyatv/mrp/__init__.py b/pyatv/mrp/__init__.py index e03f8790..26f85ffb 100644 --- a/pyatv/mrp/__init__.py +++ b/pyatv/mrp/__init__.py @@ -110,6 +110,8 @@ _FIELD_FEATURES = { FeatureName.Position: "elapsedTimeTimestamp", } # type: Dict[FeatureName, str] +DELAY_BETWEEN_COMMANDS = 0.1 + def _cocoa_to_timestamp(time): delta = datetime.datetime(2001, 1, 1) - datetime.datetime(1970, 1, 1) @@ -527,6 +529,7 @@ class MrpPower(Power): async def turn_off(self, await_new_state: bool = False) -> None: """Turn device off.""" await self.remote.home(InputAction.Hold) + await asyncio.sleep(DELAY_BETWEEN_COMMANDS) await self.remote.select() if await_new_state and self.power_state != PowerState.Off:
postlund/pyatv
df5614971c295fcc8a82b642b6d53689c4756ac7
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c06a3e99..121c427b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -23,6 +23,8 @@ jobs: - 3.9 steps: - uses: actions/checkout@v2 + with: + fetch-depth: 2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: @@ -62,7 +64,7 @@ jobs: run: tox -q -p auto -e regression if: matrix.python-version == '3.8' && runner.os == 'Linux' - name: Upload coverage to Codecov - uses: codecov/[email protected] + uses: codecov/[email protected] with: token: ${{ secrets.CODECOV_TOKEN }} file: ./coverage.xml diff --git a/tests/mrp/test_mrp_functional.py b/tests/mrp/test_mrp_functional.py index 7cce3130..23f2493b 100644 --- a/tests/mrp/test_mrp_functional.py +++ b/tests/mrp/test_mrp_functional.py @@ -1,5 +1,6 @@ """Functional tests using the API with a fake Apple TV.""" +import math import logging from ipaddress import IPv4Address from aiohttp.test_utils import unittest_run_loop @@ -19,7 +20,7 @@ from pyatv.conf import AirPlayService, MrpService, AppleTV from pyatv.mrp.protobuf import CommandInfo_pb2 from tests import common_functional_tests -from tests.utils import until, faketime +from tests.utils import until, faketime, stub_sleep from tests.fake_device import FakeAppleTV from tests.fake_device.mrp import APP_NAME, PLAYER_IDENTIFIER from tests.fake_device.airplay import DEVICE_CREDENTIALS @@ -247,6 +248,7 @@ class MRPFunctionalTest(common_functional_tests.CommonFunctionalTests): # Check if power state changes after turn_off command await self.atv.power.turn_off() + assert math.isclose(stub_sleep(), 0.1) await until(lambda: self.atv.power.power_state == PowerState.Off) await until(lambda: listener.old_state == PowerState.On) await until(lambda: listener.new_state == PowerState.Off)
Since tvOS 14 turn_off SOMETIMES only open the lateral menu, maybe just timing issue... Since tvOS 14 (I cannot easily know if it was already the case with 14.0.1, or even 14.0.0, but with tvOS 13 this issue never came into light), the turn_off command I send using atvremote sometimes only open the lateral menu, and don't validate the sleep command. A simple select command works or a second turn_off does the job every time though. System Setup: - OS: Mac OS X Mojave 10.14.6 - Python: 3.8.3 - pyatv: 0.7.5 (using atvremote) - Apple TV: Apple TV 4 tvOS 14.0.2 build 18J411
0.0
df5614971c295fcc8a82b642b6d53689c4756ac7
[ "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_power_state" ]
[ "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_always_available_features", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_basic_device_info", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_down", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_down_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_home", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_left", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_left_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_menu", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_menu_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_next", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_pause", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_play", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_play_pause", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_previous", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_right", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_right_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_select", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_select_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_stop", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_suspend", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_top_menu", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_up", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_up_actions", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_volume_down", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_volume_up", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_button_wakeup", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_close_connection", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_connect_invalid_protocol", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_connect_missing_device_id", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_artwork", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_play_url", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_when_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_features_with_supported_commands", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_invalid_airplay_credentials_format", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_invalid_credentials_format", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_item_id_hash", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_item_updates", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_cache", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_id", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_id_no_identifier", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_none_if_not_available", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_artwork_width_and_height", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_device_id", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_loading", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_music_paused", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_music_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_none_type_when_not_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_playback_rate_device_state", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_seeking", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_video_paused", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_metadata_video_playing", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_pair_missing_service", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_local_file", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_pause_emulation", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url_authenticated", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_play_url_not_authenticated_error", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_app", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_playing_immutable", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_power_state_acknowledgement", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_push_updater_active", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_push_updates", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_repeat_state", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_seek_in_playing_media", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_repeat", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_shuffle_albums", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_set_shuffle_common", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_shuffle_state_albums", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_shuffle_state_common", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_skip_forward_backward", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_update_client_before_setstate", "tests/mrp/test_mrp_functional.py::MRPFunctionalTest::test_volume_controls" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
2021-01-10 21:05:58+00:00
mit
4,634