repo
stringlengths 7
60
| instance_id
stringlengths 11
64
| base_commit
stringlengths 40
40
| patch
stringlengths 237
114k
| test_patch
stringclasses 1
value | problem_statement
stringlengths 20
58k
| hints_text
stringlengths 0
67.7k
| created_at
timestamp[ns]date 2015-08-08 06:08:58
2024-12-12 22:07:22
| environment_setup_commit
stringclasses 1
value | version
stringclasses 1
value | FAIL_TO_PASS
sequencelengths 0
0
| PASS_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
lukingroup/pylabnet | lukingroup__pylabnet-258 | 687798ef5220437b5c15d5997eff0f8bb07892b0 | diff --git a/pylabnet/scripts/pulsemaster/pulseblock_constructor.py b/pylabnet/scripts/pulsemaster/pulseblock_constructor.py
index 9f657aa4..eab016f0 100644
--- a/pylabnet/scripts/pulsemaster/pulseblock_constructor.py
+++ b/pylabnet/scripts/pulsemaster/pulseblock_constructor.py
@@ -24,6 +24,10 @@ def __init__(self, name, log, var_dict, config=None):
self.pulseblock = None
self.config = config
+ if "iq_cal_path" in self.config:
+ self.iq_calibration = IQ_Calibration(log=log)
+ self.iq_calibration.load_calibration(self.config["iq_cal_path"])
+
def default_placeholder_value(self, placeholder_name):
for key in Placeholder.default_values:
@@ -112,12 +116,9 @@ def compile_pulseblock(self):
# Handle IQ mixing case
if "iq" in arg_dict and arg_dict["iq"]:
- iq_calibration = IQ_Calibration()
- iq_calibration.load_calibration(self.config["iq_cal_path"])
-
(if_freq, lo_freq, phase_opt,
amp_i_opt, amp_q_opt,
- dc_i_opt, dc_q_opt) = iq_calibration.get_optimal_hdawg_and_LO_values(arg_dict["mod_freq"])
+ dc_i_opt, dc_q_opt) = self.iq_calibration.get_optimal_hdawg_and_LO_values(arg_dict["mod_freq"])
self.log.info(f"if={if_freq}, lo={lo_freq}, phase={phase_opt}")
@@ -149,6 +150,7 @@ def compile_pulseblock(self):
# Construct a pulse and add it to the pulseblock
# The iteration over arg_dict takes care of the IQ mixing case
+ # idx = 0 is the I portion, idx = 1 is the Q portion.
for idx, arg_dict in enumerate(arg_dict_list):
# Construct single pulse.
@@ -158,64 +160,65 @@ def compile_pulseblock(self):
pulse = None
self.log.warn(f"Found an unsupported pulse type {pb_spec.pulsetype}")
- # Store the duration of the first pulse (for IQ mixing) as the
- # pb duration is modified for the second pulse.
- if idx == 0:
- first_dur = pulse.dur
pb_dur = pulseblock.dur
+ prev_t0 = pulseblock.latest_t0
+ prev_dur = pulseblock.latest_dur
- # Insert pulse to correct position in pulseblock.
- if pb_spec.tref == "Absolute":
- pulseblock.append_po_as_pb(
- p_obj=pulse,
- offset=offset-pb_dur
- )
- elif pb_spec.tref == "After Last Pulse":
- if idx == 0:
+ # idx = 0 refers to the I pulse (or a normal non-IQ pulse)
+ if idx == 0:
+ # CASE 1
+ if pb_spec.tref == "Absolute":
+ pulseblock.append_po_as_pb(
+ p_obj=pulse,
+ offset=-pb_dur+offset
+ )
+
+ # CASE 2
+ elif pb_spec.tref in ("After Last Pulse", "At End of Sequence"): # For compatbility with previous naming
pulseblock.append_po_as_pb(
p_obj=pulse,
offset=offset
)
- # Force the 2nd pulse to start at same time as the first
- # pulse in an IQ mix pulse.
- else:
+
+ # CASE 3
+ elif pb_spec.tref in ("With Last Pulse", "With Previous Pulse"): # For compatbility with previous naming
+ # Take timing reference based on the last pulse's t0
pulseblock.append_po_as_pb(
p_obj=pulse,
- offset=-first_dur
+ offset=-pb_dur+prev_t0+offset
)
- elif pb_spec.tref == "After Last Pulse On Channel":
- # Get the end time of the last pulse on the ch
- ch = pb.Channel(name=arg_dict["ch"], is_analog=pulse.is_analog)
- if ch in pulseblock.p_dict.keys():
- last_pulse = pulseblock.p_dict[ch][-1]
- last_pulsetime = last_pulse.t0 + last_pulse.dur
- else:
- last_pulsetime = 0
- pulseblock.append_po_as_pb(
- p_obj=pulse,
- offset=last_pulsetime+offset-pb_dur
- )
- elif pb_spec.tref == "With Last Pulse":
- # Retrieve previous pulseblock:
- if i != 0:
- previous_pb_spec = self.pulse_specifiers[i-1]
- else:
- raise ValueError(
- "Cannot chose timing reference 'With Last Pulse' for first pulse in pulse-sequence."
+
+ # CASE 4
+ elif pb_spec.tref == "After Previous Pulse":
+ # Take timing reference based on the last pulse's t0 and duration
+ pulseblock.append_po_as_pb(
+ p_obj=pulse,
+ offset=-pb_dur+prev_t0+prev_dur+offset
)
- # Retrieve duration of previous pulseblock.
- prev_dur = self.resolve_value(previous_pb_spec.dur) * 1e-6
- if idx == 0:
+
+ # CASE 5
+ elif pb_spec.tref == "After Last Pulse On Channel":
+ # Get the end time of the last pulse on the ch
+ ch = pb.Channel(name=arg_dict["ch"], is_analog=pulse.is_analog)
+ if ch in pulseblock.p_dict.keys():
+ last_pulse = pulseblock.p_dict[ch][-1]
+ last_pulsetime = last_pulse.t0 + last_pulse.dur
+ else:
+ last_pulsetime = 0
+
pulseblock.append_po_as_pb(
p_obj=pulse,
- offset=-prev_dur+offset
+ offset=-pb_dur+last_pulsetime+offset
)
+
+ else:
+ # idx = 1 here (Q pulse)
# Force the 2nd pulse to start at same time as the first
- # pulse in an IQ mix pulse.
- else:
- pulseblock.append_po_as_pb(
+ # pulse in an IQ mix pulse. Note that prev_t0 is the t0 of
+ # the I pulse since this is executed right after the I pulse.
+ pulseblock.append_po_as_pb(
p_obj=pulse,
- offset=-first_dur
+ offset=-pb_dur+prev_t0
)
self.pulseblock = pulseblock
diff --git a/pylabnet/scripts/pulsemaster/pulsemaster.py b/pylabnet/scripts/pulsemaster/pulsemaster.py
index 007bd2db..29ba3a56 100644
--- a/pylabnet/scripts/pulsemaster/pulsemaster.py
+++ b/pylabnet/scripts/pulsemaster/pulsemaster.py
@@ -11,8 +11,7 @@
QFormLayout, QComboBox, QWidget, QTableWidgetItem, QVBoxLayout, \
QTableWidgetItem, QCompleter, QLabel, QLineEdit, QCheckBox, QGridLayout
from PyQt5.QtGui import QKeySequence
-from PyQt5.QtCore import QRect, Qt, QAbstractTableModel
-from PyQt5.QtCore import QVariant
+from PyQt5.QtCore import QRect, Qt, QAbstractTableModel, QTimer, QVariant
from simpleeval import simple_eval, NameNotDefined
@@ -145,6 +144,9 @@ def __init__(self, config, ui='pulsemaster', logger_client=None, server_port=Non
self.add_pb_popup = None
+ # Initialize timers for controlling when text boxes get updated
+ self.timers = []
+
# Initialize preserve_bits checkbox state in dictionary
self.update_preserve_bits()
@@ -157,6 +159,12 @@ def __init__(self, config, ui='pulsemaster', logger_client=None, server_port=Non
# Apply all custom styles
self.apply_custom_styles()
+ # Set the number of plotting points for the pulse preview window
+ if "plot_points" in self.config_dict:
+ self.plot_points = self.config_dict["plot_points"]
+ else:
+ self.plot_points = 800 # Default value
+
self.awg_running = False
def apply_custom_styles(self):
@@ -547,7 +555,7 @@ def prep_plotdata(self, pb_obj):
t1, t2 = new_t1, new_t2
# Draw the current pulse at high grid density
- t_ar = np.linspace(t1, t2, 2000)
+ t_ar = np.linspace(t1, t2, self.plot_points)
x_ar.extend(t_ar)
y_ar.extend(p_item.get_value(t_ar))
@@ -866,11 +874,11 @@ def update_pulse_form_field(self, pulse_specifier, pulse_specifier_field, field_
var_parent_field.setEnabled(True)
var_parent_field.setText("")
- # If the t0 term is variable, we must set to "after last pulse",
+ # If the t0 term is variable, we must set to "At End of Sequence",
# otherwise we have no idea when the pulse happens.
if field_var == "offset_var":
tref_field = widgets_dict["tref"]
- tref_field.setCurrentIndex(tref_field.findText("After Last Pulse"))
+ tref_field.setCurrentIndex(tref_field.findText("At End of Sequence"))
self.update_pulse_form_field(pulse_specifier, tref_field, "tref", widgets_dict, pulse_index)
# Store the updated value in parent
@@ -971,7 +979,12 @@ def get_pulse_specifier_form(self, pulse_specifier, pb_constructor, pulse_index)
elif type(field_input) is QLineEdit:
field_input.setText(str(value))
- field_input.textEdited.connect(pulse_mod_function)
+ # Create a timer to prevent the pulse update function from being called immediately
+ self.timers.append(QTimer())
+ self.timers[-1].setSingleShot(True)
+ self.timers[-1].setInterval(300)
+ self.timers[-1].timeout.connect(pulse_mod_function)
+ field_input.textEdited.connect(self.timers[-1].start)
elif type(field_input) is QCheckBox:
field_input.setChecked(bool(value))
@@ -1211,6 +1224,9 @@ def add_pulseblock_constructors_from_popup(self):
# Close popup
self.add_pb_popup.close()
+ # Update the plotting window (clears it)
+ self.plot_current_pulseblock()
+
def gen_pulse_specifier(self, pulsetype_dict, pulse_data_dict):
""" Generates instance of PulseSpecifier which contain full
information of pulse (Pulsetype, channel_number, pulsetype, pulse_parameters,
@@ -1344,7 +1360,7 @@ def clean_and_validate_pulsedict(self, pulsedict):
if key != "tref":
try:
# Try to resolve arithmetic expression containing variables.
- pulsedict[key] = simple_eval(val, names=self.vars)
+ simple_eval(val, names=self.vars)
except NameNotDefined:
typecast_error.append(key)
validated = False
@@ -1393,9 +1409,9 @@ def read_pulse_params_from_form(self):
return False, None
# Check that the specified channels for IQ are not in the same core
- if len(pulse_ch_list) > 1:
- # Subtract 1 to make 0-indexed
- ch_num_list = [(self.ch_assignment_dict[ch][1] - 1) for ch in pulse_ch_list]
+ # if len(pulse_ch_list) > 1:
+ # # Subtract 1 to make 0-indexed
+ # ch_num_list = [(self.ch_assignment_dict[ch][1] - 1) for ch in pulse_ch_list]
# Divide by 2 to see if same core (e.g. channels 0, 1 // 2 = 0)
# ch_num_list = [ch//2 for ch in ch_num_list]
diff --git a/pylabnet/utils/iq_upconversion/iq_calibration.py b/pylabnet/utils/iq_upconversion/iq_calibration.py
index 20f2c75e..96ba5e01 100644
--- a/pylabnet/utils/iq_upconversion/iq_calibration.py
+++ b/pylabnet/utils/iq_upconversion/iq_calibration.py
@@ -27,8 +27,9 @@
class IQ_Calibration():
- def __init__(self):
+ def __init__(self, log=None):
self.initialized = False
+ self.log = log
def load_calibration(self, filename):
self.initialized = True
@@ -283,7 +284,7 @@ def get_optimal_hdawg_values(self, if_freq, lo_freq):
if (not self.initialized):
raise ValueError("No calibration loaded!")
- #Computing the optimal I and Q amplitudes
+ # Computing the optimal I and Q amplitudes
q_opt, phase_opt = self.get_ampl_phase(if_freq, lo_freq)
amp_i_opt = 2 * q_opt / (1 + q_opt) * self.IF_volt
amp_q_opt = 2 * self.IF_volt / (1 + q_opt)
@@ -293,7 +294,7 @@ def get_optimal_hdawg_values(self, if_freq, lo_freq):
return phase_opt, amp_i_opt, amp_q_opt, dc_i_opt, dc_q_opt
def set_optimal_hdawg_and_LO_values(self, hd, mw_source, freq, HDAWG_ports=[3,4], oscillator=2):
- '''Finds optimnal IF and LO frequencies for given output frequency.
+ '''Finds optimal IF and LO frequencies for given output frequency.
Sets the optimal sine output values on the hdawg for the found IF
and LO frequencies. Will also set the HDAWG's sine frequency and LO
frequency to the correct value.'''
@@ -351,7 +352,7 @@ def get_optimal_hdawg_and_LO_values(self, freq):
for iff in if_f:
lof = freq-iff
- if lof > LO[0] and lof < LO[-1]:
+ if LO[0] < lof < LO[-1]:
hm1, h0, h1, h2, h3 = self.get_harmonic_powers(iff, lof)
fidelity.append(self.get_fidelity(hm1, h0, h1, h2, h3, iff))
else:
diff --git a/pylabnet/utils/pulseblock/pulse_block.py b/pylabnet/utils/pulseblock/pulse_block.py
index e7ee5d14..55c80184 100644
--- a/pylabnet/utils/pulseblock/pulse_block.py
+++ b/pylabnet/utils/pulseblock/pulse_block.py
@@ -2,8 +2,8 @@
import copy
class Channel:
- """ Class to represent a signal channel.
- """
+ """ Class to represent a signal channel.
+ """
def __init__(self, name, is_analog):
self.name = name
self.is_analog = is_analog
@@ -102,6 +102,8 @@ def __init__(self, p_obj_list=None, dflt_dict=None, name='', use_auto_dflt=True)
self.p_dict = dict()
self.dflt_dict = dict()
self.use_auto_dflt = use_auto_dflt
+ self.latest_t0 = 0
+ self.latest_dur = 0
if dflt_dict is not None:
self.dflt_dict = copy.deepcopy(dflt_dict)
@@ -207,7 +209,7 @@ def _insert(self, p_obj, cflct_er=True, use_auto_dflt=True):
)
)
-
+
# Check if the channel already exists with the samne name but a
# different type
for key in self.p_dict.keys():
@@ -237,7 +239,9 @@ def _insert(self, p_obj, cflct_er=True, use_auto_dflt=True):
if use_auto_dflt:
self.dflt_dict[ch] = p_obj.auto_default
-
+ # Update the latest values that have been added to the PB
+ self.latest_t0 = p_obj.t0
+ self.latest_dur = p_obj.dur
def insert(self, p_obj, cflct_er=True):
""" Insert a new Pulse object into PulseBlock
@@ -434,6 +438,10 @@ def insert_pb(self, pb_obj, t0=0, cflct_er=True):
pb_obj.dflt_dict[ch]
)
+ # Update the latest values that have been added to the PB
+ self.latest_t0 = t0
+ self.latest_dur = pb_obj.dur
+
def join_pb(self, pb_obj, t0=0, cflct_er=True, name=''):
""" Same as insert_pb(), but instead of modifying self,
a new PulseBlock is created. Self is not altered.
| Clear pulse preview window when new pb is created
| 2021-06-29T00:07:29 | 0.0 | [] | [] |
|||
funkelab/motile_tracker | funkelab__motile_tracker-23 | 65bc35cecc8ee9deac66d0d8b330aafd5ffbd561 | diff --git a/src/motile_plugin/widgets/run_editor.py b/src/motile_plugin/widgets/run_editor.py
index ee78f0a..4567c74 100644
--- a/src/motile_plugin/widgets/run_editor.py
+++ b/src/motile_plugin/widgets/run_editor.py
@@ -4,13 +4,12 @@
from typing import TYPE_CHECKING
from warnings import warn
+import magicgui.widgets
+import napari.layers
import numpy as np
-from fonticon_fa6 import FA6S
from motile_plugin.backend.motile_run import MotileRun
-from napari.layers import Labels
from qtpy.QtCore import Signal
from qtpy.QtWidgets import (
- QComboBox,
QGroupBox,
QHBoxLayout,
QLabel,
@@ -20,13 +19,11 @@
QVBoxLayout,
QWidget,
)
-from superqt.fonticon import icon
from .params_editor import SolverParamsEditor
if TYPE_CHECKING:
import napari
- import napari.layers
logger = logging.getLogger(__name__)
@@ -48,7 +45,7 @@ def __init__(self, viewer: napari.Viewer):
self.solver_params_widget = SolverParamsEditor()
self.run_name: QLineEdit
self.refresh_layer_button: QPushButton
- self.layer_selection_box: QComboBox
+ self.layer_selection_box: magicgui.widgets.Widget
main_layout = QVBoxLayout()
main_layout.addWidget(self._run_widget())
@@ -68,59 +65,41 @@ def _labels_layer_widget(self) -> QWidget:
layer_group = QWidget()
layer_layout = QHBoxLayout()
layer_layout.setContentsMargins(0, 0, 0, 0)
- layer_layout.addWidget(QLabel("Input Layer:"))
-
- # Layer selection combo box
- self.layer_selection_box = QComboBox()
- self.update_labels_layers()
- self.layer_selection_box.setToolTip(
+ label = QLabel("Input Layer:")
+ layer_layout.addWidget(label)
+ label.setToolTip(
"Select the labels layer you want to use for tracking"
)
- size_policy = self.layer_selection_box.sizePolicy()
- size_policy.setHorizontalPolicy(QSizePolicy.MinimumExpanding)
- self.layer_selection_box.setSizePolicy(size_policy)
- layer_layout.addWidget(self.layer_selection_box)
- # Refresh button
- self.refresh_layer_button = QPushButton(
- icon=icon(FA6S.arrows_rotate, color="white")
- )
- self.refresh_layer_button.setToolTip(
- "Refresh this selection box with current napari layers"
+ # # Layer selection combo box
+ self.layer_selection_box = magicgui.widgets.create_widget(
+ annotation=napari.layers.Labels
)
- self.refresh_layer_button.clicked.connect(self.update_labels_layers)
- layer_layout.addWidget(self.refresh_layer_button)
+ layers_events = self.viewer.layers.events
+ layers_events.inserted.connect(self.layer_selection_box.reset_choices)
+ layers_events.removed.connect(self.layer_selection_box.reset_choices)
+ layers_events.reordered.connect(self.layer_selection_box.reset_choices)
+
+ qlayer_select = self.layer_selection_box.native
+
+ size_policy = qlayer_select.sizePolicy()
+ size_policy.setHorizontalPolicy(QSizePolicy.MinimumExpanding)
+ qlayer_select.setSizePolicy(size_policy)
+ layer_layout.addWidget(qlayer_select)
layer_group.setLayout(layer_layout)
return layer_group
- def update_labels_layers(self) -> None:
- """Update the layer selection box with the labels layers in the viewer"""
- self.layer_selection_box.clear()
- for layer in self.viewer.layers:
- if isinstance(layer, Labels):
- self.layer_selection_box.addItem(layer.name)
- if len(self.layer_selection_box) == 0:
- self.layer_selection_box.addItem("None")
-
def get_labels_data(self) -> np.ndarray | None:
"""Get the input segmentation given the current selection in the
layer dropdown.
Returns:
np.ndarray | None: The data of the labels layer with the name
- that is selected, or None if the layer name is not present in
- the viewer or is not a labels layer.
+ that is selected, or None if no layer is selected.
"""
- layer_name = self.layer_selection_box.currentText()
- if layer_name == "None" or layer_name not in self.viewer.layers:
- return None
- layer = self.viewer.layers[layer_name]
- if not isinstance(layer, Labels):
- warn(
- f"Layer {layer_name} is not a Labels layer. List refresh needed",
- stacklevel=2,
- )
+ layer = self.layer_selection_box.value
+ if layer is None:
return None
return layer.data
| Use magicgui to get labels layers that is mostly synced (no refresh button)
| 2024-05-10T11:05:35 | 0.0 | [] | [] |
|||
1QB-Information-Technologies/ccvm | 1QB-Information-Technologies__ccvm-174 | a170953051be11f6c91eb2f58a207c0a0a402b0c | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4393cb2f..838a170e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,12 +15,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Restructured the metadata object to include a `device` field, a
`result_metadata` list, and a `metadata_dict`.
-### Fixed
+### Fixed
- Fixed issue where `s` was being updated incorrectly on each iteration of `DLSolver._solve()`.
+- Fixed the calculation of `solve_time` and `pp_time` for all solvers to reflect the time
+for a single calculation of an instance intead of the time to solve the full batch
### Added
- Added `PumpedLangevinSolver`, which is an extension of `LangevinSolver` to simulate pumped Langevin dynamics with a demo script in the examples directory.
- Implemented a simple gradient descent post-processing step, as described in the paper, similar to Langevin dynamics but without noise; uses the Euler method with box constraint imposition at each iteration.
+- Added a scaling coefficient for the feedback term in `dl-ccvm` as an input to the solver
### Changed
- Streamlined README by relocating and optimizing architecture diagrams.
@@ -29,6 +32,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Consolidated test organization by centralizing all tests under a unified
`tests` folder, with subdirectories for unit, integration, and test data. This
enhances accessibility and clarity in managing test-related resources.
+- Optimized default parameters of `dl-ccvm`, `mf-ccvm`, and `langevin_solver` to reflect the findings of our latest research
+- Changed the default value of `num_iter_main` in the `grad_descent` post-processor to produce
+better results for the default value.
- Updated the `add_metadata` function in the Metadata class to
`add_to_result_metadata` for clarity.
diff --git a/README.md b/README.md
index c134ee96..aa7c9a5e 100644
--- a/README.md
+++ b/README.md
@@ -122,8 +122,8 @@ print(f"The best known solution to this problem is {solution.optimal_value}")
print(f"The best objective value found by the solver was {solution.best_objective_value}")
# The best objective value found by the solver was 798.1630859375
-print(f"The solving process took {solution.solve_time} seconds")
-# The solving process took 8.949262142181396 seconds
+print(f"The solving process took effectively {solution.solve_time} seconds to solve a single instance")
+# The solving process took 0.008949262142181396 seconds
```
## Documentation
@@ -144,7 +144,7 @@ Thank you for considering making a contribution to our project! We appreciate y
## References
-This repository contains architectures and simulators presented in the paper ["Non-convex Quadratic Programming Using Coherent Optical Networks"](https://arxiv.org/abs/2209.04415) by Farhad Khosravi, Ugur Yildiz, Artur Scherer, and Pooya Ronagh.
+This repository contains architectures and simulators presented in the paper ["Non-convex Quadratic Programming Using Coherent Optical Networks"](https://arxiv.org/abs/2209.04415) by Farhad Khosravi, Ugur Yildiz, Martin Perreault, Artur Scherer, and Pooya Ronagh.
## License
diff --git a/ccvm_simulators/post_processor/grad_descent.py b/ccvm_simulators/post_processor/grad_descent.py
index 4d4193a7..0ca38eb4 100644
--- a/ccvm_simulators/post_processor/grad_descent.py
+++ b/ccvm_simulators/post_processor/grad_descent.py
@@ -17,7 +17,7 @@ def postprocess(
v_vector,
lower_clamp=0.0,
upper_clamp=1.0,
- num_iter_main=100,
+ num_iter_main=1000,
num_iter_pp=None,
step_size=0.1,
):
diff --git a/ccvm_simulators/solution.py b/ccvm_simulators/solution.py
index 8b781889..8917d66d 100644
--- a/ccvm_simulators/solution.py
+++ b/ccvm_simulators/solution.py
@@ -15,7 +15,7 @@ class Solution:
objective_values (torch.Tensor): The objective values of the solutions
found by the solver.
iterations (int): The iteration number for this problem size.
- solve_time (float): Time to solve the problem.
+ solve_time (float): The effective time to solve the problem instance only once.
pp_time (float): Time to post-process the problem.
optimal_value (float): The optimal objective value for the given problem instance.
best_value (float): The best objective value for the given problem instance.
diff --git a/ccvm_simulators/solvers/dl_solver.py b/ccvm_simulators/solvers/dl_solver.py
index 9baf1473..95a7c757 100644
--- a/ccvm_simulators/solvers/dl_solver.py
+++ b/ccvm_simulators/solvers/dl_solver.py
@@ -7,7 +7,7 @@
import torch.distributions as tdist
import time
-DL_SCALING_MULTIPLIER = 0.5
+DL_SCALING_MULTIPLIER = 0.2
"""The value used by the DLSolver when calculating a scaling value in
super.get_scaling_factor()"""
@@ -76,7 +76,9 @@ def parameter_key(self):
@parameter_key.setter
def parameter_key(self, parameters):
- expected_dlparameter_key_set = set(["pump", "dt", "iterations", "noise_ratio"])
+ expected_dlparameter_key_set = set(
+ ["pump", "dt", "iterations", "noise_ratio", "feedback_scale"]
+ )
parameter_key_list = parameters.values()
# Iterate over the parameters for each given problem size
for parameter_key in parameter_key_list:
@@ -93,7 +95,7 @@ def parameter_key(self, parameters):
self._parameter_key = parameters
self._is_tuned = False
- def _calculate_drift_boxqp(self, c, s, pump, rate, S=1):
+ def _calculate_drift_boxqp(self, c, s, pump, rate, feedback_scale=100, S=1):
"""We treat the SDE that simulates the CIM of NTT as drift
calculation.
@@ -122,8 +124,9 @@ def _calculate_drift_boxqp(self, c, s, pump, rate, S=1):
s_grad_2 = torch.einsum("cj,cj -> cj", -1 - (pump * rate) - c_pow - s_pow, s)
s_grad_3 = self.v_vector / 2 / S
- c_drift = -c_grad_1 + c_grad_2 - c_grad_3
- s_drift = -s_grad_1 + s_grad_2 - s_grad_3
+ feedback_scale_dynamic = feedback_scale * (0.5 + rate)
+ c_drift = -feedback_scale_dynamic * (c_grad_1 + c_grad_3) + c_grad_2
+ s_drift = -feedback_scale_dynamic * (s_grad_1 + s_grad_3) + s_grad_2
return c_drift, s_drift
def _calculate_grads_boxqp(self, c, s, S=1):
@@ -238,6 +241,7 @@ def _solve(
dt,
iterations,
noise_ratio,
+ feedback_scale,
pump_rate_flag,
g,
evolution_step_size,
@@ -287,7 +291,9 @@ def _solve(
noise_ratio_i = (noise_ratio - 1) * np.exp(-(i + 1) / iterations * 3) + 1
- c_drift, s_drift = self.calculate_drift(c, s, pump, pump_rate)
+ c_drift, s_drift = self.calculate_drift(
+ c, s, pump, pump_rate, feedback_scale
+ )
wiener_increment_c = (
wiener_dist_c.sample((problem_size,)).transpose(0, 1)
* np.sqrt(dt)
@@ -576,6 +582,7 @@ def __call__(
dt = self.parameter_key[problem_size]["dt"]
iterations = self.parameter_key[problem_size]["iterations"]
noise_ratio = self.parameter_key[problem_size]["noise_ratio"]
+ feedback_scale = self.parameter_key[problem_size]["feedback_scale"]
except KeyError as e:
raise KeyError(
f"The parameter '{e.args[0]}' for the given instance size is not defined."
@@ -639,6 +646,7 @@ def __call__(
dt,
iterations,
noise_ratio,
+ feedback_scale,
pump_rate_flag,
g,
evolution_step_size,
@@ -655,6 +663,7 @@ def __call__(
dt,
iterations,
noise_ratio,
+ feedback_scale,
pump_rate_flag,
g,
evolution_step_size,
@@ -666,8 +675,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer for the solve
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Run the post processor on the results, if specified
if post_processor:
@@ -678,7 +690,8 @@ def __call__(
problem_variables = post_processor_object.postprocess(
self.change_variables(c, S), self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = c
pp_time = 0.0
diff --git a/ccvm_simulators/solvers/langevin_solver.py b/ccvm_simulators/solvers/langevin_solver.py
index 8a9e1b28..dd9a1e8b 100644
--- a/ccvm_simulators/solvers/langevin_solver.py
+++ b/ccvm_simulators/solvers/langevin_solver.py
@@ -7,7 +7,7 @@
import torch.distributions as tdist
import time
-LANGEVIN_SCALING_MULTIPLIER = 0.5
+LANGEVIN_SCALING_MULTIPLIER = 0.05
"""The value used by the LangevinSolver when calculating a scaling value in
super.get_scaling_factor()"""
@@ -534,8 +534,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer for the solve
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Run the post processor on the results, if specified
if post_processor:
@@ -546,7 +549,8 @@ def __call__(
problem_variables = post_processor_object.postprocess(
c, self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = c
pp_time = 0.0
diff --git a/ccvm_simulators/solvers/mf_solver.py b/ccvm_simulators/solvers/mf_solver.py
index 321b8695..2aa4e248 100644
--- a/ccvm_simulators/solvers/mf_solver.py
+++ b/ccvm_simulators/solvers/mf_solver.py
@@ -7,7 +7,7 @@
import torch.distributions as tdist
import time
-MF_SCALING_MULTIPLIER = 0.1
+MF_SCALING_MULTIPLIER = 0.05
"""The value used by the MFSolver when calculating a scaling value in
super.get_scaling_factor()"""
@@ -679,8 +679,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Run the post processor on the results, if specified
if post_processor:
@@ -689,12 +692,10 @@ def __call__(
)
problem_variables = post_processor_object.postprocess(
- self.change_variables(mu_tilde, S),
- self.q_matrix,
- self.v_vector,
- device=device,
+ self.change_variables(mu_tilde, S), self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = self.change_variables(mu_tilde, S)
pp_time = 0.0
diff --git a/ccvm_simulators/solvers/pumped_langevin_solver.py b/ccvm_simulators/solvers/pumped_langevin_solver.py
index dcce5a00..78dd22e6 100644
--- a/ccvm_simulators/solvers/pumped_langevin_solver.py
+++ b/ccvm_simulators/solvers/pumped_langevin_solver.py
@@ -288,9 +288,7 @@ def _solve(
):
# Update the record of the sample values with the values found at
# this iteration
- self.c_sample[
- :, :, samples_taken
- ] = c
+ self.c_sample[:, :, samples_taken] = c
samples_taken += 1
return c
@@ -442,8 +440,8 @@ def __call__(
evolution_file=None,
algorithm_parameters=None,
):
- """Solves the box-constrained programming problem using the pumped Langevin solver using
- either Adam algorithm for the calculation of the gradient of the objective function or
+ """Solves the box-constrained programming problem using the pumped Langevin solver using
+ either Adam algorithm for the calculation of the gradient of the objective function or
the simple gradient descent method. This choice can be set in the argument of `algorithm_parameters`.
Args:
@@ -578,8 +576,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer for the solve
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Calibrate the variable
c_prime = (c + S) / (2 * S)
@@ -593,7 +594,8 @@ def __call__(
problem_variables = post_processor_object.postprocess(
c_prime, self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = c_prime
pp_time = 0.0
diff --git a/examples/ccvm_boxqp_dl.py b/examples/ccvm_boxqp_dl.py
index d582f017..a9387c44 100644
--- a/examples/ccvm_boxqp_dl.py
+++ b/examples/ccvm_boxqp_dl.py
@@ -14,7 +14,13 @@
# Supply solver parameters for different problem sizes
solver.parameter_key = {
- 20: {"pump": 2.0, "dt": 0.005, "iterations": 15000, "noise_ratio": 10},
+ 20: {
+ "pump": 8.0,
+ "feedback_scale": 100,
+ "dt": 0.001,
+ "iterations": 1500,
+ "noise_ratio": 10,
+ },
}
# Load test instances to solve
diff --git a/examples/ccvm_boxqp_mf.py b/examples/ccvm_boxqp_mf.py
index 2c56c699..f8d30ed9 100644
--- a/examples/ccvm_boxqp_mf.py
+++ b/examples/ccvm_boxqp_mf.py
@@ -15,12 +15,12 @@
# Supply solver parameters for different problem sizes
solver.parameter_key = {
20: {
- "pump": 0.5,
- "feedback_scale": 20,
- "j": 20,
- "S": 0.2,
+ "pump": 0.0,
+ "feedback_scale": 4000,
+ "j": 5.0,
+ "S": 20.0,
"dt": 0.0025,
- "iterations": 15000,
+ "iterations": 1500,
}
}
@@ -42,10 +42,10 @@
# (2) algorithm_parameters=AdamParameters(..) for the Adam algorithm
solution = solver(
instance=boxqp_instance,
- post_processor=None,
- algorithm_parameters=AdamParameters(
- alpha=0.001, beta1=0.9, beta2=0.999, add_assign=False
- ),
+ post_processor="grad-descent",
+ # algorithm_parameters=AdamParameters(
+ # alpha=0.001, beta1=0.9, beta2=0.999, add_assign=False
+ # ),
)
print(solution)
diff --git a/examples/langevin_boxqp.py b/examples/langevin_boxqp.py
index a5a7a2eb..d39331cb 100644
--- a/examples/langevin_boxqp.py
+++ b/examples/langevin_boxqp.py
@@ -15,9 +15,9 @@
# Supply solver parameters for different problem sizes
solver.parameter_key = {
20: {
- "dt": 0.005,
- "iterations": 15000,
- "sigma": 0.02,
+ "dt": 0.002,
+ "iterations": 1500,
+ "sigma": 0.5,
"feedback_scale": 1.0,
},
}
@@ -40,7 +40,7 @@
# (2) algorithm_parameters=AdamParameters(..) for the Adam algorithm
solution = solver(
instance=boxqp_instance,
- post_processor=None,
+ post_processor="grad-descent",
# algorithm_parameters=AdamParameters(
# alpha=0.001, beta1=0.9, beta2=0.999, add_assign=False
# ),
diff --git a/examples/pumped_langevin_boxqp.py b/examples/pumped_langevin_boxqp.py
index 87d19b95..9b9cf46c 100644
--- a/examples/pumped_langevin_boxqp.py
+++ b/examples/pumped_langevin_boxqp.py
@@ -19,7 +19,7 @@
20: {
"pump": 2.0, # p0
"dt": 0.002,
- "iterations": 15000,
+ "iterations": 1500,
"sigma": 0.5,
"feedback_scale": 1.0,
},
@@ -43,7 +43,7 @@
# (2) algorithm_parameters=AdamParameters(..) for the Adam algorithm
solution = solver(
instance=boxqp_instance,
- post_processor=None,
+ post_processor="grad-descent",
# algorithm_parameters=AdamParameters(
# alpha=0.001, beta1=0.9, beta2=0.999, add_assign=True
# ),
| Updating the solvers and examples files to reproduce the results of the paper
Currently, the solvers are not producing good results. Some of the parameters of the solvers need to be adjusted to reproduce the results of the paper. This also requires some changes to the code of the solvers such as `langevin_solver`.
| 2024-02-29T16:48:39 | 0.0 | [] | [] |
|||
IN-CORE/pyincore | IN-CORE__pyincore-101 | ff26791e2cde1ff75f79b72869183f74dc842ac8 | diff --git a/pyincore/analyses/joplincge/equationlib.py b/pyincore/analyses/joplincge/equationlib.py
index bd90a05cf..3ebce8328 100644
--- a/pyincore/analyses/joplincge/equationlib.py
+++ b/pyincore/analyses/joplincge/equationlib.py
@@ -77,7 +77,7 @@ def set_value(self, name, values, target):
if 'nrows' in info and 'ncols' in info:
values = pd.DataFrame(index=info['rows'], columns=info['cols']).fillna(values)
elif 'nrows' in info and 'ncols' not in info:
- values = pd.Series(index=info['rows']).fillna(values)
+ values = pd.Series(index=info['rows'], dtype='float64').fillna(values)
if type(values) == pd.DataFrame:
rows = values.index.tolist()
@@ -193,7 +193,7 @@ def get(self, name, x=None):
for j in info['cols']:
ret.at[i, j] = x[self.get_index(name, row=i, col=j)]
elif 'nrows' in info and 'ncols' not in info:
- ret = pd.Series(index=info['rows']).fillna(0.0)
+ ret = pd.Series(index=info['rows'], dtype='float64').fillna(0.0)
for i in info['rows']:
ret.at[i] = x[self.get_index(name, row=i)]
elif 'nrows' not in info and 'ncols' not in info:
diff --git a/pyincore/analyses/joplincge/joplincge.py b/pyincore/analyses/joplincge/joplincge.py
index 35a2d6022..ae8b50b9e 100644
--- a/pyincore/analyses/joplincge/joplincge.py
+++ b/pyincore/analyses/joplincge/joplincge.py
@@ -398,16 +398,16 @@ def run(self):
ALPHA = pd.DataFrame(index=F, columns=I).fillna(0.0)
B = pd.DataFrame(index=I, columns=IG).fillna(0.0)
B1 = pd.DataFrame(index=I, columns=I).fillna(0.0)
- CMOWAGE = pd.Series(index=CM).fillna(0.0)
- CMIWAGE = pd.Series(index=L).fillna(0.0)
+ CMOWAGE = pd.Series(index=CM, dtype='float64').fillna(0.0)
+ CMIWAGE = pd.Series(index=L, dtype='float64').fillna(0.0)
FCONST = pd.DataFrame(index=F, columns=I).fillna(0.0)
- GAMMA = pd.Series(index=I).fillna(0.0)
- DELTA = pd.Series(index=I).fillna(0.0)
+ GAMMA = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DELTA = pd.Series(index=I, dtype='float64').fillna(0.0)
PIT = pd.DataFrame(index=G, columns=H).fillna(0.0)
- PRIVRET = pd.Series(index=H).fillna(0.0)
- LFOR = pd.Series(index=LA).fillna(0.0)
- KFOR = pd.Series(index=K).fillna(0.0)
- GFOR = pd.Series(index=G).fillna(0.0)
+ PRIVRET = pd.Series(index=H, dtype='float64').fillna(0.0)
+ LFOR = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ KFOR = pd.Series(index=K, dtype='float64').fillna(0.0)
+ GFOR = pd.Series(index=G, dtype='float64').fillna(0.0)
out = pd.DataFrame(index=G, columns=G).fillna(0.0)
TAUFH = pd.DataFrame(index=G, columns=F).fillna(0.0)
TAUFL = pd.DataFrame(index=G, columns=L).fillna(0.0)
@@ -423,39 +423,39 @@ def run(self):
TAUX = pd.DataFrame(index=G, columns=IG).fillna(0.0)
TAUG = pd.DataFrame(index=G, columns=I).fillna(0.0)
TAXS = pd.DataFrame(index=G, columns=G).fillna(0.0)
- TAXS1 = pd.Series(index=GNL).fillna(0.0)
+ TAXS1 = pd.Series(index=GNL, dtype='float64').fillna(0.0)
# ELASTICITIES AND TAX DATA IMPOSED
BETA = pd.DataFrame(index=I, columns=H).fillna(0.0)
- ETAD = pd.Series(index=I).fillna(0.0)
- ETAE = pd.Series(index=I).fillna(0.0)
- ETAI = pd.Series(index=IG).fillna(0.0)
+ ETAD = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAE = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAI = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETAIX = pd.DataFrame(index=K, columns=IG).fillna(0.0)
ETAL = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- ETAL1 = pd.Series(index=IG).fillna(0.0)
- ETALB1 = pd.Series(index=IG).fillna(0.0)
+ ETAL1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ ETALB1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETALB = pd.DataFrame(index=L, columns=IG).fillna(0.0)
- ETAM = pd.Series(index=I).fillna(0.0)
- ETAYDO = pd.Series(index=H).fillna(0.0)
- ETAYDI = pd.Series(index=H).fillna(0.0)
- ETAUO = pd.Series(index=H).fillna(0.0)
- ETAUI = pd.Series(index=H).fillna(0.0)
- ETARA = pd.Series(index=H).fillna(0.0)
- ETAPT = pd.Series(index=H).fillna(0.0)
- ETAPIT = pd.Series(index=H).fillna(0.0)
-
- EXWGEO = pd.Series(index=CM).fillna(0.0)
- EXWGEI = pd.Series(index=L).fillna(0.0)
- ECOMI = pd.Series(index=L).fillna(0.0)
- ECOMO = pd.Series(index=CM).fillna(0.0)
+ ETAM = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAYDO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAYDI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETARA = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPT = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPIT = pd.Series(index=H, dtype='float64').fillna(0.0)
+
+ EXWGEO = pd.Series(index=CM, dtype='float64').fillna(0.0)
+ EXWGEI = pd.Series(index=L, dtype='float64').fillna(0.0)
+ ECOMI = pd.Series(index=L, dtype='float64').fillna(0.0)
+ ECOMO = pd.Series(index=CM, dtype='float64').fillna(0.0)
JOBCOR = pd.DataFrame(index=H, columns=L).fillna(0.0)
OUTCOR = pd.DataFrame(index=H, columns=CM).fillna(0.0)
LAMBDA = pd.DataFrame(index=I, columns=I).fillna(0.0)
- NRPG = pd.Series(index=H).fillna(0.0)
- depr = pd.Series(index=IG).fillna(0.1)
+ NRPG = pd.Series(index=H, dtype='float64').fillna(0.0)
+ depr = pd.Series(index=IG, dtype='float64').fillna(0.1)
# ARRAYS BUILT TO EXPORT RESULTS TO SEPARATE FILE
@@ -468,60 +468,60 @@ def run(self):
CG0T = pd.DataFrame(index=I, columns=G).fillna(0.0)
CH0 = pd.DataFrame(index=I, columns=H).fillna(0.0)
CH0T = pd.DataFrame(index=I, columns=H).fillna(0.0)
- CMI0 = pd.Series(index=L).fillna(0.0)
- CMO0 = pd.Series(index=CM).fillna(0.0)
- CN0 = pd.Series(index=I).fillna(0.0)
- CN0T = pd.Series(index=I).fillna(0.0)
- CPI0 = pd.Series(index=H).fillna(0.0)
- CPIN0 = pd.Series(index=H).fillna(0.0)
- CPIH0 = pd.Series(index=H).fillna(0.0)
- CX0 = pd.Series(index=I).fillna(0.0)
- D0 = pd.Series(index=I).fillna(0.0)
- DD0 = pd.Series(index=Z).fillna(0.0)
- DS0 = pd.Series(index=Z).fillna(0.0)
- mine = pd.Series(index=Z).fillna(0.0)
- DQ0 = pd.Series(index=Z).fillna(0.0)
+ CMI0 = pd.Series(index=L, dtype='float64').fillna(0.0)
+ CMO0 = pd.Series(index=CM, dtype='float64').fillna(0.0)
+ CN0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CN0T = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CPI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CX0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ D0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DD0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DS0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ mine = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DQ0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
FD0 = pd.DataFrame(index=F, columns=Z).fillna(0.0)
IGT0 = pd.DataFrame(index=G, columns=G).fillna(0.0)
KS0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
LAS0 = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- HH0 = pd.Series(index=H).fillna(0.0)
- HN0 = pd.Series(index=H).fillna(0.0)
- HW0 = pd.Series(index=H).fillna(0.0)
- M0 = pd.Series(index=I).fillna(0.0)
- M01 = pd.Series(index=Z).fillna(0.0)
- MI0 = pd.Series(index=H).fillna(0.0)
- MO0 = pd.Series(index=H).fillna(0.0)
+ HH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HW0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ M0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ M01 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ MI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ MO0 = pd.Series(index=H, dtype='float64').fillna(0.0)
N0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
# NKIO
- KPFOR01 = pd.Series(index=K).fillna(0.0)
- KPFOR0 = pd.Series(index=K).fillna(0.0)
- LNFOR0 = pd.Series(index=LA).fillna(0.0)
- LNFOR01 = pd.Series(index=LA).fillna(0.0)
- GVFOR0 = pd.Series(index=G).fillna(0.0)
- P0 = pd.Series(index=IG).fillna(0.0)
- PD0 = pd.Series(index=I).fillna(0.0)
- PVA0 = pd.Series(index=I).fillna(0.0)
- PWM0 = pd.Series(index=I).fillna(0.0)
- Q0 = pd.Series(index=Z).fillna(0.0)
- Q10 = pd.Series(index=Z).fillna(0.0)
+ KPFOR01 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ KPFOR0 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ LNFOR0 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ LNFOR01 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ GVFOR0 = pd.Series(index=G, dtype='float64').fillna(0.0)
+ P0 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ PD0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PVA0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PWM0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ Q0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ Q10 = pd.Series(index=Z, dtype='float64').fillna(0.0)
R0 = pd.DataFrame(index=F, columns=Z).fillna(1.0)
- RA0 = pd.Series(index=F).fillna(0.0)
- S0 = pd.Series(index=Z).fillna(0.0)
+ RA0 = pd.Series(index=F, dtype='float64').fillna(0.0)
+ S0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
# SPIO
- V0 = pd.Series(index=I).fillna(0.0)
- V0T = pd.Series(index=I).fillna(0.0)
+ V0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ V0T = pd.Series(index=I, dtype='float64').fillna(0.0)
TP = pd.DataFrame(index=H, columns=G).fillna(0.0)
# TAUF0 = Table(G,F,Z)
- YD0 = pd.Series(index=H).fillna(0.0)
- Y0 = pd.Series(index=Z).fillna(0.0)
+ YD0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ Y0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
# -------------------------------------------------------------------------------------------------------------
# CALCULATIONS OF PARAMETERS AND INITIAL VALUES
@@ -540,7 +540,6 @@ def run(self):
BETA = pd.concat([MISC.loc[I, ['ETAY']]] * len(H), axis=1)
BETA.columns = H
- # LAMBDA = pd.concat([MISC.loc[I, ['ETAOP']]]*len(I), axis=1)
LAMBDA.columns = I
for i in I:
LAMBDA.loc[[i], [i]] = MISC.at[i, 'ETAOP']
@@ -785,8 +784,12 @@ def run(self):
# RA0.loc[F] = 1.0
# create data frame for factor taxes by sector
- a = pd.Series(index=I).fillna(0.0)
- a = SAM.loc[USSOCL, I].append(a, ignore_index=True).append(SAM.loc[GL, I]) # labor, land, capital
+ a = SAM.loc[USSOCL, I].reset_index(drop=True)
+ # add a row with zeros
+ a.loc[len(a)] = [0.0] * len(I)
+ # add a row with PTAXJop data
+ a = pd.concat([a, SAM.loc[GL, I]]) # labor, land, capital
+
a.index = F
ALPHA.loc[F, I] = (SAM.loc[F, I] + a.loc[F, I]) / (SAM.loc[F, I].sum(0) + SAM.loc[GF, I].sum(0))
@@ -1819,15 +1822,17 @@ def run_solver(cons_filename, temp_file_name):
households = ["HH1", "HH2", "HH3", "HH4", "HH5"]
labor_groups = ["L1", "L2", "L3", "L4", "L5"]
sectors = ["Goods", "Trades", "Others", "HS1", "HS2", "HS3"]
+ # note TRADE vs TRADES, OTHERS vs OTHER in capitalized sectors
+ sectors_cap = ["GOODS", "TRADE", "OTHER", "HS1", "HS2", "HS3"]
FD0.insert(loc=0, column="Labor Group", value=labor_groups)
FDL.insert(loc=0, column="Labor Group", value=labor_groups)
- gross_income = {"Household Group": households, "Y0": Y0.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index(),
- "YL": YL.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index()}
- hh = {"Household Group": households[:5], "HH0": HH0.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index(),
- "HHL": HHL.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index()}
- ds = {"Sectors": sectors, "DS0": DS0.loc[{"GOODS", "TRADE", "OTHER", "HS1", "HS2", "HS3"}].sort_index(),
- "DSL": vars.get('DS', result[-1]).loc[{"GOODS", "TRADE", "OTHER", "HS1", "HS2", "HS3"}].sort_index()}
+ gross_income = {"Household Group": households, "Y0": Y0.loc[households].sort_index(),
+ "YL": YL.loc[households].sort_index()}
+ hh = {"Household Group": households[:5], "HH0": HH0.loc[households].sort_index(),
+ "HHL": HHL.loc[households].sort_index()}
+ ds = {"Sectors": sectors, "DS0": DS0.loc[sectors_cap].sort_index(),
+ "DSL": vars.get('DS', result[-1]).loc[sectors_cap].sort_index()}
self.set_result_csv_data("domestic-supply", pd.DataFrame(ds), name="domestic-supply",
source="dataframe")
diff --git a/pyincore/analyses/seasidecge/equationlib.py b/pyincore/analyses/seasidecge/equationlib.py
index 8e013f678..bd1ab0775 100644
--- a/pyincore/analyses/seasidecge/equationlib.py
+++ b/pyincore/analyses/seasidecge/equationlib.py
@@ -84,7 +84,7 @@ def set_value(self, name, values, target):
if 'nrows' in info and 'ncols' in info:
values = pd.DataFrame(index=info['rows'], columns=info['cols']).fillna(values)
elif 'nrows' in info and 'ncols' not in info:
- values = pd.Series(index=info['rows']).fillna(values)
+ values = pd.Series(index=info['rows'], dtype='float64').fillna(values)
if type(values) == pd.DataFrame:
rows = values.index.tolist()
@@ -197,7 +197,7 @@ def get(self, name, x=None):
for j in info['cols']:
ret.at[i, j] = x[self.getIndex(name, row=i, col=j)]
elif 'nrows' in info and 'ncols' not in info:
- ret = pd.Series(index=info['rows']).fillna(0.0)
+ ret = pd.Series(index=info['rows'], dtype='float64').fillna(0.0)
for i in info['rows']:
ret.at[i] = x[self.getIndex(name, row=i)]
elif 'nrows' not in info and 'ncols' not in info:
diff --git a/pyincore/analyses/seasidecge/seasidecge.py b/pyincore/analyses/seasidecge/seasidecge.py
index 15829e414..e1aea0cd5 100644
--- a/pyincore/analyses/seasidecge/seasidecge.py
+++ b/pyincore/analyses/seasidecge/seasidecge.py
@@ -392,20 +392,20 @@ def _(x):
AD = pd.DataFrame(index=Z, columns=Z).fillna(0.0)
AG = pd.DataFrame(index=Z, columns=G).fillna(0.0)
AGFS = pd.DataFrame(index=Z, columns=G).fillna(0.0)
- SIGMA = pd.Series(index=I).fillna(0.0)
+ SIGMA = pd.Series(index=I, dtype='float64').fillna(0.0)
ALPHA = pd.DataFrame(index=F, columns=I).fillna(0.0)
ALPHA1 = pd.DataFrame(index=F, columns=I).fillna(0.0)
B = pd.DataFrame(index=I, columns=IG).fillna(0.0)
B1 = pd.DataFrame(index=I, columns=I).fillna(0.0)
- CMIWAGE = pd.Series(index=L).fillna(0.0)
+ CMIWAGE = pd.Series(index=L, dtype='float64').fillna(0.0)
FCONST = pd.DataFrame(index=F, columns=I).fillna(0.0)
- GAMMA = pd.Series(index=I).fillna(0.0)
- DELTA = pd.Series(index=I).fillna(0.0)
+ GAMMA = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DELTA = pd.Series(index=I, dtype='float64').fillna(0.0)
PIT = pd.DataFrame(index=G, columns=H).fillna(0.0)
- PRIVRET = pd.Series(index=H).fillna(0.0)
- LFOR = pd.Series(index=LA).fillna(0.0)
- KFOR = pd.Series(index=K).fillna(0.0)
- GFOR = pd.Series(index=G).fillna(0.0)
+ PRIVRET = pd.Series(index=H, dtype='float64').fillna(0.0)
+ LFOR = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ KFOR = pd.Series(index=K, dtype='float64').fillna(0.0)
+ GFOR = pd.Series(index=G, dtype='float64').fillna(0.0)
out = pd.DataFrame(index=G, columns=G).fillna(0.0)
TAUFH = pd.DataFrame(index=G, columns=F).fillna(0.0)
TAUFL = pd.DataFrame(index=G, columns=L).fillna(0.0)
@@ -421,43 +421,43 @@ def _(x):
TAUX = pd.DataFrame(index=G, columns=IG).fillna(0.0)
TAUG = pd.DataFrame(index=G, columns=I).fillna(0.0)
TAXS = pd.DataFrame(index=G, columns=G).fillna(0.0)
- TAXS1 = pd.Series(index=GNL).fillna(0.0)
+ TAXS1 = pd.Series(index=GNL, dtype='float64').fillna(0.0)
# ELASTICITIES AND TAX DATA IMPOSED
BETA = pd.DataFrame(index=I, columns=H).fillna(0.0)
BETAH = pd.DataFrame(index=HD, columns=H).fillna(0.0)
- ETAD = pd.Series(index=I).fillna(0.0)
- ETAE = pd.Series(index=I).fillna(0.0)
- ETAI = pd.Series(index=IG).fillna(0.0)
+ ETAD = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAE = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAI = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETAIX = pd.DataFrame(index=K, columns=IG).fillna(0.0)
ETAL = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- ETAL1 = pd.Series(index=IG).fillna(0.0)
- ETALB1 = pd.Series(index=IG).fillna(0.0)
+ ETAL1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ ETALB1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETALB = pd.DataFrame(index=L, columns=IG).fillna(0.0)
- ETAM = pd.Series(index=I).fillna(0.0)
- ETAYD = pd.Series(index=H).fillna(0.0)
- ETAYDO = pd.Series(index=H).fillna(0.0)
- ETAYDI = pd.Series(index=H).fillna(0.0)
- ETAU = pd.Series(index=H).fillna(0.0)
- ETAUO = pd.Series(index=H).fillna(0.0)
- ETAUI = pd.Series(index=H).fillna(0.0)
- ETARA = pd.Series(index=H).fillna(0.0)
- ETAPT = pd.Series(index=H).fillna(0.0)
- ETAPIT = pd.Series(index=H).fillna(0.0)
-
- EXWGEI = pd.Series(index=L).fillna(0.0)
- ECOMI = pd.Series(index=L).fillna(0.0)
+ ETAM = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAYD = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAYDO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAYDI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAU = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETARA = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPT = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPIT = pd.Series(index=H, dtype='float64').fillna(0.0)
+
+ EXWGEI = pd.Series(index=L, dtype='float64').fillna(0.0)
+ ECOMI = pd.Series(index=L, dtype='float64').fillna(0.0)
JOBCOR = pd.DataFrame(index=H, columns=L).fillna(0.0)
LAMBDA = pd.DataFrame(index=I, columns=I).fillna(0.0)
LAMBDAH = pd.DataFrame(index=HD, columns=HD1).fillna(0.0)
- NRPG = pd.Series(index=H).fillna(0.0)
- depr = pd.Series(index=IG).fillna(0.1)
+ NRPG = pd.Series(index=H, dtype='float64').fillna(0.0)
+ depr = pd.Series(index=IG, dtype='float64').fillna(0.1)
- RHO = pd.Series(index=I).fillna(0.0)
+ RHO = pd.Series(index=I, dtype='float64').fillna(0.0)
TT = pd.DataFrame(index=F, columns=IG).fillna(0.0)
# ARRAYS BUILT TO EXPORT RESULTS TO SEPARATE FILE
@@ -471,60 +471,60 @@ def _(x):
CG0T = pd.DataFrame(index=I, columns=G).fillna(0.0)
CH0 = pd.DataFrame(index=I, columns=H).fillna(0.0)
CH0T = pd.DataFrame(index=I, columns=H).fillna(0.0)
- CMI0 = pd.Series(index=L).fillna(0.0)
- CN0 = pd.Series(index=I).fillna(0.0)
- CN0T = pd.Series(index=I).fillna(0.0)
- CPI0 = pd.Series(index=H).fillna(0.0)
- CPIN0 = pd.Series(index=H).fillna(0.0)
- CPIH0 = pd.Series(index=H).fillna(0.0)
- CX0 = pd.Series(index=I).fillna(0.0)
- D0 = pd.Series(index=I).fillna(0.0)
- DD0 = pd.Series(index=Z).fillna(0.0)
- DS0 = pd.Series(index=Z).fillna(0.0)
- DQ0 = pd.Series(index=Z).fillna(0.0)
+ CMI0 = pd.Series(index=L, dtype='float64').fillna(0.0)
+ CN0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CN0T = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CPI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CX0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ D0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DD0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DS0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DQ0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
FD0 = pd.DataFrame(index=F, columns=Z).fillna(0.0)
IGT0 = pd.DataFrame(index=G, columns=GX).fillna(0.0)
KS0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
LAS0 = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- HH0 = pd.Series(index=H).fillna(0.0)
- HN0 = pd.Series(index=H).fillna(0.0)
- HW0 = pd.Series(index=H).fillna(0.0)
- M0 = pd.Series(index=I).fillna(0.0)
- M01 = pd.Series(index=Z).fillna(0.0)
- MI0 = pd.Series(index=H).fillna(0.0)
- MO0 = pd.Series(index=H).fillna(0.0)
+ HH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HW0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ M0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ M01 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ MI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ MO0 = pd.Series(index=H, dtype='float64').fillna(0.0)
N0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
# NKIO
- KPFOR01 = pd.Series(index=K).fillna(0.0)
- KPFOR0 = pd.Series(index=K).fillna(0.0)
- LNFOR0 = pd.Series(index=LA).fillna(0.0)
- LNFOR01 = pd.Series(index=LA).fillna(0.0)
- GVFOR0 = pd.Series(index=G).fillna(0.0)
- P0 = pd.Series(index=IG).fillna(0.0)
- PH0 = pd.Series(index=HD).fillna(0.0)
- PD0 = pd.Series(index=I).fillna(0.0)
- PVA0 = pd.Series(index=I).fillna(0.0)
- PWM0 = pd.Series(index=I).fillna(0.0)
- PW0 = pd.Series(index=I).fillna(0.0)
- Q0 = pd.Series(index=Z).fillna(0.0)
- Q10 = pd.Series(index=Z).fillna(0.0)
+ KPFOR01 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ KPFOR0 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ LNFOR0 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ LNFOR01 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ GVFOR0 = pd.Series(index=G, dtype='float64').fillna(0.0)
+ P0 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ PH0 = pd.Series(index=HD, dtype='float64').fillna(0.0)
+ PD0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PVA0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PWM0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PW0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ Q0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ Q10 = pd.Series(index=Z, dtype='float64').fillna(0.0)
R0 = pd.DataFrame(index=F, columns=Z).fillna(1.0)
- RA0 = pd.Series(index=F).fillna(0.0)
- S0 = pd.Series(index=Z).fillna(0.0)
+ RA0 = pd.Series(index=F, dtype='float64').fillna(0.0)
+ S0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
# SPIO
- V0 = pd.Series(index=I).fillna(0.0)
- V0T = pd.Series(index=I).fillna(0.0)
+ V0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ V0T = pd.Series(index=I, dtype='float64').fillna(0.0)
TP = pd.DataFrame(index=H, columns=G).fillna(0.0)
# TAUF0 = Table(G,F,Z)
- YD0 = pd.Series(index=H).fillna(0.0)
- Y0 = pd.Series(index=Z).fillna(0.0)
+ YD0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ Y0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
for label in G1:
out.loc[label, label] = 0
@@ -870,13 +870,15 @@ def _(x):
PVA0.loc[I] = PD0.loc[I] - (
AD.loc[I, I].mul(P0.loc[I], axis='index').mul(1.0 + TAUQ.loc[GS, I].sum(0).T, axis='index').sum(0).T)
- RHO.loc[I] = (1 - SIGMA.loc[I]) / SIGMA.loc[I];
+ RHO.loc[I] = (1 - SIGMA.loc[I]) / SIGMA.loc[I]
# RA0.loc[F] = 1.0
# create data frame for factor taxes by sector
-
- a = pd.Series(index=I).fillna(0.0)
- a = SAM.loc[USSOCL, I].append(a, ignore_index=True).append(SAM.loc[GL, I]) # labor, land, capital
+ a = SAM.loc[USSOCL, I].reset_index(drop=True)
+ # add a row with zeros
+ a.loc[len(a)] = [0.0] * len(I)
+ # add a row with PROPTX data
+ a = pd.concat([a, SAM.loc[GL, I]]) # labor, land, capital
a.index = F
ALPHA.loc[F, I] = (SAM.loc[F, I] + a.loc[F, I]) / (SAM.loc[F, I].sum(0) + SAM.loc[GF, I].sum(0))
| Pandas warning - default dtype for empty series will be 'object' instead of 'float64' in a future version
joplin_cge.ipynb and seaside_cge.ipynb under incore-docs/notebooks has warning from pandas
> FutureWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.
the dump of warning message is in attached.
[pandas-warning.log](https://github.com/IN-CORE/pyincore/files/8044002/pandas-warning.log)
| 2022-02-16T17:16:43 | 0.0 | [] | [] |
|||
redhat-cip/hardware | redhat-cip__hardware-196 | d9f97ce64a9df1be207613b311e4d1dff98120ac | diff --git a/hardware/areca.py b/hardware/areca.py
index f61654c1..55b74d94 100644
--- a/hardware/areca.py
+++ b/hardware/areca.py
@@ -19,6 +19,9 @@
from subprocess import Popen
import sys
+from hardware import detect_utils
+
+
SEP_REGEXP = re.compile(r"\s*:\s*")
@@ -147,6 +150,13 @@ def _disable_password():
def detect():
"""Detect Areca controller configuration."""
+ if not detect_utils.which('cli64'):
+ sys.stderr.write('Cannot find cli64 binary\n')
+ return []
+ return detect_areca()
+
+
+def detect_areca():
hwlist = []
device = _sys_info()
if not device:
@@ -193,6 +203,4 @@ def detect():
if len(hwlist):
return hwlist
- # If we dont't detect any areca controller, return None
- # This avoid having empty lists
- return None
+ return []
| Improving the output...
Meanwhile, I can run `apt install python3-hardware` and then `hardware-detect --human` but the output
comes with a few error messages that could be easily catched:
```
/bin/sh: 1: cli64: not found
Info: No Areca controller found
Cannot find megacli on the system
read_smart: Reading S.M.A.R.T information on /dev/sdb
read_smart: Reading S.M.A.R.T information on /dev/sdb with -d ata
read_smart: no device /dev/sdb
read_smart: Reading S.M.A.R.T information on /dev/sda
read_smart: Reading S.M.A.R.T information on /dev/sda with -d ata
read_smart: no device /dev/sda
modprobe: FATAL: Module ipmi_smb not found in directory /lib/modules/6.1.0-5-amd64
Info: Probing ipmi_si failed
Info: Probing ipmi_devintf failed
IANA PEN registry open failed: No such file or directory
Info: No Infiniband device found
IANA PEN registry open failed: No such file or directory
/bin/sh: 1: Syntax error: end of file unexpected
Unable to run hp-conrep:
[('hpa', 'slots', 'count', '2'),
```
The one with cli64: not found, Syntax error?, Unable to run...
| 2023-04-03T15:39:37 | 0.0 | [] | [] |
|||
PaddlePaddle/PARL | PaddlePaddle__PARL-1076 | 2d48f6ced3ded581732bbe39152bf3934eac782f | diff --git a/parl/utils/utils.py b/parl/utils/utils.py
index 3d169d1ef..991b7ad5f 100644
--- a/parl/utils/utils.py
+++ b/parl/utils/utils.py
@@ -64,6 +64,32 @@ def get_fluid_version():
_HAS_PADDLE = False
_HAS_TORCH = False
+def check_installed_framework_in_windows():
+ global _HAS_FLUID, _HAS_PADDLE, _HAS_TORCH
+ # paddle & fluid
+ try:
+ _HAS_FLUID = False
+ _HAS_PADDLE = False
+ import paddle
+ from paddle import fluid
+
+ paddle_version = get_fluid_version()
+ logger.info("paddlepaddle version: {}.".format(paddle.__version__))
+ if paddle_version < 200 and paddle_version != 0:
+ assert paddle_version >= 185, "PARL requires paddle >= 1.8.5 and paddle < 2.0.0"
+ _HAS_FLUID = True
+ else:
+ _HAS_PADDLE = True
+ except ImportError as e:
+ _HAS_FLUID = False
+ _HAS_PADDLE = False
+ # torch
+ try:
+ import torch
+ _HAS_TORCH = True
+ except ImportError:
+ _HAS_TORCH = False
+
def check_installed_framework():
def check(installed_framework):
try:
@@ -101,11 +127,15 @@ def check(installed_framework):
_HAS_TORCH = installed_framework['_HAS_TORCH']
del manager, installed_framework
-check_installed_framework()
_IS_WINDOWS = (sys.platform == 'win32')
_IS_MAC = (sys.platform == 'darwin')
+if _IS_WINDOWS:
+ check_installed_framework_in_windows()
+else:
+ check_installed_framework()
+
def kill_process(regex_pattern):
"""kill process whose execution commnad is matched by regex pattern
| pickleçéï¼æ æ³import parl
å¨import parlæ¶ï¼æ¥AttributeError: Can't pickle local object 'check_installed_framework.<locals>.check'çéï¼å¨parl\utils\utils.py in <module>æ件ä¸ã
| æè°¢ä½ çåé¦ï¼éº»ç¦æä¾ä¸ä½ çpyçæ¬ãè¿è¡ç³»ç»ã以åparlçæ¬ã
æç¨çæ¯python 3.9.13ï¼osæ¯Windows11ï¼parlççæ¬æ¯2.2ã
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ12æ¥(ææ天) æä¸10:28
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
æè°¢ä½ çåé¦ï¼éº»ç¦æä¾ä¸ä½ çpyçæ¬ãè¿è¡ç³»ç»ã以åparlçæ¬ã
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
好çï¼æ们ä»å¤©ç¡®è®¤ä¸é®é¢å¹¶ä¿®å¤ï¼å¨è¿æé´å¯ä»¥ä½¿ç¨parl2.1
好çï¼æè°¢
---åå§é®ä»¶---
å件人: "Bo ***@***.***>
åéæ¶é´: 2023å¹´3æ13æ¥(å¨ä¸) ä¸å9:02
æ¶ä»¶äºº: ***@***.***>;
æé: ***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
好çï¼æ们ä»å¤©ç¡®è®¤ä¸é®é¢å¹¶ä¿®å¤ï¼å¨è¿æé´å¯ä»¥ä½¿ç¨parl2.1
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
好çï¼æåé®ä¸ä¸å¦ä½ä¸è½½parl2.1
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å9:02
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
好çï¼æ们ä»å¤©ç¡®è®¤ä¸é®é¢å¹¶ä¿®å¤ï¼å¨è¿æé´å¯ä»¥ä½¿ç¨parl2.1
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
pip install parl==2.1
æè
python -m pip install parl==2.1
æå·²ç»å®è£
好äºï¼æå¨ä½¿ç¨2.1æ¶ï¼'Agent' object has no attribute 'fluid_executor'ï¼fluid_executoræ¯è¢«å¼ç¨äºåï¼è¯·é®ç¨ä»ä¹æ¿ä»£ç
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:07
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
pip install parl==2.1
æè
python -m pip install parl==2.1
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
æ¯çï¼fluid_executorå·²ç»åºå¼äºï¼è¿æ¯éæå¾æ¶ä»£çæ¥å£ã
parlå¨2.0å¼å§å°±å
¨é¢è½¬ç§»å°å¨æå¾äºï¼å¦æå¸æ继ç»ä½¿ç¨fluid_executorï¼å¯ä»¥å®è£
1.4å以ä¸ççæ¬ã
æ æ³å®è£
1.4å以ä¸çæ¬ï¼è¯·é®æä»ä¹åï¼ç´æ¥pip install parl==1.4ä¼å¤±è´¥ã
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:11
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
æ¯çï¼fluid_executorå·²ç»åºå¼äºï¼è¿æ¯éæå¾æ¶ä»£çæ¥å£ã
parlå¨2.0å¼å§å°±å
¨é¢è½¬ç§»å°å¨æå¾äºï¼å¦æå¸æ继ç»ä½¿ç¨fluid_executorï¼å¯ä»¥å®è£
1.4å以ä¸ççæ¬ã
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
å¢ï¼æ¯å 为parlå¨åè¡1.4çæ¶åï¼py39è¿æ²¡åºæ¥ããä½ å¨py39çç¯å¢ä¸æ¯æ¾ä¸å°1.4çæ¬çãè¿æ ·çè¯ï¼ä½ è¿å¾é级pythonã建议éç¨anacondaæ¥é
ç½®ä¸åçæ¬pythonç¯å¢ã
å®æ¹ç°å¨æä»ä¹ææ°å¦ä¹ parlçææ¡£æè
è§é¢æç¨åï¼æèªå·±æ¹ä¸ä¸ä»£ç ï¼æè
1.4çéè¦ä»ä¹çæ¬çpython
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:22
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
å¢ï¼æ¯å 为parlå¨åè¡1.4çæ¶åï¼py39è¿æ²¡åºæ¥ããä½ å¨py39çç¯å¢ä¸æ¯æ¾ä¸å°1.4çæ¬çãè¿æ ·çè¯ï¼ä½ è¿å¾é级pythonã建议éç¨anacondaæ¥é
ç½®ä¸åçæ¬pythonç¯å¢ã
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
ç§èå¸çæç¨æ¯3å¹´åçäºï¼å
³äºææ°å¦ä¹ ææ¡£çè¯ï¼ç®åæ们建议ç´æ¥çææ¡£+quick_startçexampleæ¥çæå¨æå¾çparlã
ä¸æææ¡£å¯ä»¥åèè¿éï¼https://github.com/PaddlePaddle/PARL/blob/develop/docs/zh_CN/Overview.md
è±æççè¿éï¼https://parl.readthedocs.io/en/latest/index.html
å¦å¤parl1.4çæ¬éè¦python3.7.
å·²æ¶å°ï¼æè°¢ã
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:27
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
ç§èå¸çæç¨æ¯3å¹´åçäºï¼å
³äºææ°å¦ä¹ ææ¡£çè¯ï¼ç®åæ们建议ç´æ¥çææ¡£+quick_startçexampleæ¥çæå¨æå¾çparlã
ä¸æææ¡£å¯ä»¥åèè¿éï¼https://github.com/PaddlePaddle/PARL/blob/develop/docs/zh_CN/Overview.md
è±æççè¿éï¼https://parl.readthedocs.io/en/latest/index.html
å¦å¤parl1.4çæ¬éè¦python3.7.
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***> | 2023-03-13T03:25:24 | 0.0 | [] | [] |
||
MannLabs/alphadia | MannLabs__alphadia-382 | bd9d1857781e9342acd0b1ba9b5a037ee5a53cc4 | diff --git a/alphadia/outputaccumulator.py b/alphadia/outputaccumulator.py
index 16b8b808..9f1e8547 100644
--- a/alphadia/outputaccumulator.py
+++ b/alphadia/outputaccumulator.py
@@ -78,7 +78,8 @@ def _calculate_fragment_position(self):
def parse_output_folder(
self,
folder: str,
- selected_precursor_columns: list[str] | None = None,
+ mandatory_precursor_columns: list[str] | None = None,
+ optional_precursor_columns: list[str] | None = None,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""
Parse the output folder to get a precursor and fragment dataframe in the flat format.
@@ -87,7 +88,7 @@ def parse_output_folder(
----------
folder : str
The output folder to be parsed.
- selected_precursor_columns : list, optional
+ mandatory_precursor_columns : list, optional
The columns to be selected from the precursor dataframe, by default ['precursor_idx', 'sequence', 'flat_frag_start_idx', 'flat_frag_stop_idx', 'charge', 'rt_library', 'mobility_library', 'mz_library', 'proteins', 'genes', 'mods', 'mod_sites', 'proba']
Returns
@@ -99,8 +100,8 @@ def parse_output_folder(
"""
- if selected_precursor_columns is None:
- selected_precursor_columns = [
+ if mandatory_precursor_columns is None:
+ mandatory_precursor_columns = [
"precursor_idx",
"sequence",
"flat_frag_start_idx",
@@ -108,12 +109,10 @@ def parse_output_folder(
"charge",
"rt_library",
"rt_observed",
- "rt_calibrated",
"mobility_library",
"mobility_observed",
"mz_library",
"mz_observed",
- "mz_calibrated",
"proteins",
"genes",
"mods",
@@ -121,16 +120,28 @@ def parse_output_folder(
"proba",
"decoy",
]
+
+ if optional_precursor_columns is None:
+ optional_precursor_columns = [
+ "rt_calibrated",
+ "mz_calibrated",
+ ]
+
psm_df = pd.read_parquet(os.path.join(folder, "psm.parquet"))
frag_df = pd.read_parquet(os.path.join(folder, "frag.parquet"))
- assert set(
- selected_precursor_columns
- ).issubset(
- psm_df.columns
- ), f"selected_precursor_columns must be a subset of psm_df.columns didnt find {set(selected_precursor_columns) - set(psm_df.columns)}"
- psm_df = psm_df[selected_precursor_columns]
- # validate.precursors_flat_from_output(psm_df)
+ if not set(mandatory_precursor_columns).issubset(psm_df.columns):
+ raise ValueError(
+ f"mandatory_precursor_columns must be a subset of psm_df.columns didnt find {set(mandatory_precursor_columns) - set(psm_df.columns)}"
+ )
+
+ available_columns = sorted(
+ list(
+ set(mandatory_precursor_columns)
+ | (set(optional_precursor_columns) & set(psm_df.columns))
+ )
+ )
+ psm_df = psm_df[available_columns]
# get foldername of the output folder
foldername = os.path.basename(folder)
@@ -260,9 +271,6 @@ def __init__(self, folders: list, number_of_processes: int):
self._lock = threading.Lock() # Lock to prevent two processes trying to update the same subscriber at the same time
def subscribe(self, subscriber: BaseAccumulator):
- assert isinstance(
- subscriber, BaseAccumulator
- ), f"subscriber must be an instance of BaseAccumulator, got {type(subscriber)}"
self._subscribers.append(subscriber)
def _update_subscriber(
@@ -420,14 +428,21 @@ def post_process(self):
Post process the consensus_speclibase by normalizing retention times.
"""
+ norm_delta_max = self._norm_delta_max
+ if "rt_calibrated" not in self.consensus_speclibase.precursor_df.columns:
+ logger.warning(
+ "rt_calibrated not found in the precursor_df, delta-max normalization will not be performed"
+ )
+ norm_delta_max = False
+
+ logger.info("Performing quality control for transfer learning.")
+ logger.info(f"Normalize by delta: {norm_delta_max}")
logger.info(
- "Performing quality control for transfer learning."
- + f"Normalize by delta: {self._norm_delta_max}"
- + f"Precursor correlation cutoff: {self._precursor_correlation_cutoff}"
- + f"Fragment correlation cutoff: {self._fragment_correlation_ratio}"
+ f"Precursor correlation cutoff: {self._precursor_correlation_cutoff}"
)
+ logger.info(f"Fragment correlation cutoff: {self._fragment_correlation_ratio}")
- if self._norm_delta_max:
+ if norm_delta_max:
self.consensus_speclibase = normalize_rt_delta_max(
self.consensus_speclibase
)
@@ -563,13 +578,20 @@ def ms2_quality_control(
# calculate the median correlation for the precursor
intensity_mask = flat_intensity > 0.0
- median_correlation = np.median(flat_correlation[intensity_mask])
+ median_correlation = (
+ np.median(flat_correlation[intensity_mask]) if intensity_mask.any() else 0.0
+ )
# use the precursor for MS2 learning if the median correlation is above the cutoff
use_for_ms2[i] = median_correlation > precursor_correlation_cutoff
- fragment_intensity_view[:] = fragment_intensity_view * (
- fragment_correlation_view > median_correlation * fragment_correlation_ratio
+ # Fix: Use loc to modify the original DataFrame instead of the view
+ spec_lib_base.fragment_intensity_df.loc[start_idx:stop_idx] = (
+ fragment_intensity_view.values
+ * (
+ fragment_correlation_view
+ > median_correlation * fragment_correlation_ratio
+ )
)
spec_lib_base.precursor_df["use_for_ms2"] = use_for_ms2
diff --git a/alphadia/workflow/peptidecentric.py b/alphadia/workflow/peptidecentric.py
index 54d0e8f9..b03c05c0 100644
--- a/alphadia/workflow/peptidecentric.py
+++ b/alphadia/workflow/peptidecentric.py
@@ -260,6 +260,36 @@ def norm_to_rt(
else:
raise ValueError(f"Unknown norm_rt_mode {mode}")
+ def get_precursor_mz_column(self):
+ """Get the precursor m/z column name.
+ This function will return `mz_calibrated` if precursor calibration has happened, otherwise it will return `mz_library`.
+ If no MS1 data is present, it will always return `mz_library`.
+
+ Returns
+ -------
+ str
+ Name of the precursor m/z column
+
+ """
+ return (
+ f"mz_{self.optimization_manager.column_type}"
+ if self.dia_data.has_ms1
+ else "mz_library"
+ )
+
+ def get_fragment_mz_column(self):
+ return f"mz_{self.optimization_manager.column_type}"
+
+ def get_rt_column(self):
+ return f"rt_{self.optimization_manager.column_type}"
+
+ def get_mobility_column(self):
+ return (
+ f"mobility_{self.optimization_manager.column_type}"
+ if self.dia_data.has_mobility
+ else "mobility_library"
+ )
+
def get_ordered_optimizers(self):
"""Select appropriate optimizers. Targeted optimization is used if a valid target value (i.e. a number greater than 0) is specified in the config;
if a value less than or equal to 0 is supplied, automatic optimization is used.
@@ -480,6 +510,7 @@ def search_parameter_optimization(self):
log_string(
"==============================================", verbosity="progress"
)
+
if insufficient_precursors_to_optimize:
precursor_df_filtered, fragments_df_filtered = self.filter_dfs(
precursor_df, self.optlock.fragments_df
@@ -759,14 +790,10 @@ def extract_batch(
batch_precursor_df,
batch_fragment_df,
config.jitclass(),
- rt_column=f"rt_{self.optimization_manager.column_type}",
- mobility_column=f"mobility_{self.optimization_manager.column_type}"
- if self.dia_data.has_mobility
- else "mobility_library",
- precursor_mz_column=f"mz_{self.optimization_manager.column_type}"
- if self.dia_data.has_ms1
- else "mz_library",
- fragment_mz_column=f"mz_{self.optimization_manager.column_type}",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
fwhm_rt=self.optimization_manager.fwhm_rt,
fwhm_mobility=self.optimization_manager.fwhm_mobility,
)
@@ -806,14 +833,10 @@ def extract_batch(
batch_precursor_df,
batch_fragment_df,
config=config,
- rt_column=f"rt_{self.optimization_manager.column_type}",
- mobility_column=f"mobility_{self.optimization_manager.column_type}"
- if self.dia_data.has_mobility
- else "mobility_library",
- precursor_mz_column=f"mz_{self.optimization_manager.column_type}"
- if self.dia_data.has_ms1
- else "mz_library",
- fragment_mz_column=f"mz_{self.optimization_manager.column_type}",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
)
features_df, fragments_df = candidate_scoring(
@@ -1051,12 +1074,10 @@ def requantify(self, psm_df):
self.spectral_library.precursor_df_unfiltered,
self.spectral_library.fragment_df,
config=config,
- precursor_mz_column="mz_calibrated",
- fragment_mz_column="mz_calibrated",
- rt_column="rt_calibrated",
- mobility_column="mobility_calibrated"
- if self.dia_data.has_mobility
- else "mobility_library",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
)
multiplexed_candidates["rank"] = 0
@@ -1144,8 +1165,10 @@ def requantify_fragments(
candidate_speclib_flat.precursor_df,
candidate_speclib_flat.fragment_df,
config=config,
- precursor_mz_column="mz_calibrated",
- fragment_mz_column="mz_calibrated",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
)
# we disregard the precursors, as we want to keep the original scoring from the top12 search
| Transfer Leanring extraction failes after calibration failure of file 3/8
**Describe the bug**
Transfer Leanring extraction failes after calibration failure of file 3/8.
**To Reproduce**
Linux Ubuntu, AlphaDIA 1.8.1
[log.txt](https://github.com/user-attachments/files/17814527/log.txt)
| 2024-11-22T12:59:01 | 0.0 | [] | [] |
|||
jupyterlab/retrolab | jupyterlab__retrolab-138 | fd73915a598199c0fba4492c9e5f16a2895a8475 | diff --git a/README.md b/README.md
index 887ae428..c1741a4c 100644
--- a/README.md
+++ b/README.md
@@ -52,6 +52,20 @@ jupyter labextension list
Should also be available when starting `retrolab`.
+### Launching
+
+From an open notebook:
+
+1. Click the RetroLab button in the toolbar; or
+2. View > Open in RetroLab from the menu
+
+To access the main RetroLab tree (file browser):
+
+1. Help > Launch RetroLab File Browser from the menu; or
+2. Go to the /retro URL path of your Jupyter site
+
+## Tour
+
### Files ð and Running Sessions ðâï¸

diff --git a/packages/lab-extension/src/index.ts b/packages/lab-extension/src/index.ts
index 1fffea28..5d5cf53f 100644
--- a/packages/lab-extension/src/index.ts
+++ b/packages/lab-extension/src/index.ts
@@ -15,6 +15,8 @@ import { DocumentRegistry } from '@jupyterlab/docregistry';
import { IMainMenu } from '@jupyterlab/mainmenu';
+import { ITranslator } from '@jupyterlab/translation';
+
import {
INotebookModel,
INotebookTracker,
@@ -35,6 +37,7 @@ namespace CommandIDs {
* Toggle Top Bar visibility
*/
export const openRetro = 'retrolab:open';
+ export const launchRetroTree = 'retrolab:launchtree';
}
/**
@@ -124,9 +127,45 @@ const openRetro: JupyterFrontEndPlugin<void> = {
}
};
+/**
+ * A plugin to add a command to open the RetroLab Tree.
+ */
+const launchRetroTree: JupyterFrontEndPlugin<void> = {
+ id: '@retrolab/lab-extension:launch-retrotree',
+ autoStart: true,
+ requires: [ITranslator],
+ optional: [IMainMenu, ICommandPalette],
+ activate: (
+ app: JupyterFrontEnd,
+ translator: ITranslator,
+ menu: IMainMenu | null,
+ palette: ICommandPalette | null
+ ): void => {
+ const { commands } = app;
+ const trans = translator.load('jupyterlab');
+ const category = trans.__('Help');
+
+ commands.addCommand(CommandIDs.launchRetroTree, {
+ label: trans.__('Launch RetroLab File Browser'),
+ execute: () => {
+ window.open(PageConfig.getBaseUrl() + 'retro/tree');
+ }
+ });
+
+ if (menu) {
+ const helpMenu = menu.helpMenu;
+ helpMenu.addGroup([{ command: CommandIDs.launchRetroTree }], 1);
+ }
+
+ if (palette) {
+ palette.addItem({ command: CommandIDs.launchRetroTree, category });
+ }
+ }
+};
+
/**
* Export the plugins as default.
*/
-const plugins: JupyterFrontEndPlugin<any>[] = [openRetro];
+const plugins: JupyterFrontEndPlugin<any>[] = [launchRetroTree, openRetro];
export default plugins;
diff --git a/retrolab/app.py b/retrolab/app.py
index f17601bc..8b7e2fba 100644
--- a/retrolab/app.py
+++ b/retrolab/app.py
@@ -77,6 +77,12 @@ def get_page_config(self):
return page_config
+class RetroRedirectHandler(RetroHandler):
+ @web.authenticated
+ def get(self):
+ return self.redirect(self.base_url+'retro/tree')
+
+
class RetroTreeHandler(RetroHandler):
@web.authenticated
def get(self, path=None):
@@ -152,6 +158,7 @@ def initialize_handlers(self):
{"url": "/retro/edit/{0}"},
)
)
+ self.handlers.append(("/retro/?", RetroRedirectHandler))
self.handlers.append(("/retro/tree(.*)", RetroTreeHandler))
self.handlers.append(("/retro/notebooks(.*)", RetroNotebookHandler))
self.handlers.append(("/retro/edit(.*)", RetroFileHandler))
| Make it easier to find the Retro tree
Thank you very much for this repo!
Actually, I am looking into using Retro because I want to write automated Javascript tests for our QHub JupyterHub environment, and it is difficult to locate specific UI elements in a predictable manner in regular JupyterLab. Maybe Retro will make that easier...
Anyway, when I first installed it I struggled to actually understand how to access it from JupyterLab. I don't think there is documentation that says exactly what you need to do.
I'd seen your screenshots that were at the /retro/... path so I went to /retro and got 404... It was only by trying /retro/tree exactly that I realised things were working after all, I just needed to be specific on the URL.
So it would be great if at least /retro and /retro/ can redirect to /retro/tree, assuming URLs are the recommended route to access Retro. And either way, just to mention what to do in the docs would be really helpful.
Thanks again.
| Thanks @danlester.
For now the integration lab -> retro is this icon in the notebook toolbar:

Maybe there should be something similar to the "Launch Classic Notebook", but for Retro?

> So it would be great if at least /retro and /retro/ can redirect to /retro/tree
Agree it would be great to have :+1:
Thanks for your thoughts.
I just discovered the button in the notebook toolbar!
Yes, would be good if there was also a link to the retro tree somewhere before you've actually opened a notebook.
It would probably require having a similar plugin as the one in JupyterLab:
https://github.com/jupyterlab/jupyterlab/blob/b8725f0ed99b199c535caba6898f5771832e9da9/packages/help-extension/src/index.tsx#L166-L200
That would live in https://github.com/jtpio/retrolab/blob/main/packages/lab-extension/src/index.ts
In case you would like to give it a shot and open a PR :) | 2021-05-27T11:01:20 | 0.0 | [] | [] |
||
tsutaj/statements-manager | tsutaj__statements-manager-132 | 8c739aa783ef0f5411c04ae2d4dad4bf96f22b62 | diff --git a/statements_manager/main.py b/statements_manager/main.py
index 0692b6c..ef4646b 100644
--- a/statements_manager/main.py
+++ b/statements_manager/main.py
@@ -89,7 +89,7 @@ def get_parser() -> argparse.ArgumentParser:
"creds_path",
help="path to credentials file (json)\n"
"how to create credentials file: "
- "see https://github.com/tsutaj/statements-manager/blob/master/README.md#how-to-use",
+ "see https://statements-manager.readthedocs.io/ja/stable/register_credentials.html",
)
return parser
diff --git a/statements_manager/src/manager.py b/statements_manager/src/manager.py
index 73a4853..ea93c6b 100644
--- a/statements_manager/src/manager.py
+++ b/statements_manager/src/manager.py
@@ -115,7 +115,7 @@ def get_docs_contents(self, problem_id: str) -> Tuple[ContentsStatus, str]:
logger.warning(
"tips: try 'ss-manager reg-creds' before running on docs mode.\n"
"how to create credentials file: "
- "see https://github.com/tsutaj/statements-manager/blob/master/README.md#how-to-use"
+ "see https://statements-manager.readthedocs.io/ja/stable/register_credentials.html"
)
return (ContentsStatus.NG, "")
| Google Docs ã使ã£ãåé¡æå¦çæã®ã¨ã©ã¼ã®å°ç·ãå¤ã
Google Docs ã使ã£ãåé¡æãã¬ã³ããªã³ã°ãããã¨ããã¨ã¨ã©ã¼ãåºããã¨ããããããã®éã«åèã«ãªããªã³ã¯å
ãåºãã¦ããããããå¤ãã®ã§ç´ãããã
| 2023-06-17T21:49:48 | 0.0 | [] | [] |
|||
googleapis/python-db-dtypes-pandas | googleapis__python-db-dtypes-pandas-238 | 87484cd4ecdc3aa33d1786198ae76547a3f1fb9b | diff --git a/noxfile.py b/noxfile.py
index 36c6554..102670a 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -201,13 +201,16 @@ def prerelease(session, tests_path):
"--upgrade",
"pyarrow",
)
+ # Avoid pandas==2.2.0rc0 as this version causes PyArrow to fail. Once newer
+ # prerelease comes out, this constraint can be removed. See
+ # https://github.com/googleapis/python-db-dtypes-pandas/issues/234
session.install(
"--extra-index-url",
"https://pypi.anaconda.org/scipy-wheels-nightly/simple",
"--prefer-binary",
"--pre",
"--upgrade",
- "pandas",
+ "pandas!=2.2.0rc0",
)
session.install(
"mock",
diff --git a/owlbot.py b/owlbot.py
index 4b89096..d1b3c08 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -109,13 +109,16 @@ def prerelease(session, tests_path):
"--upgrade",
"pyarrow",
)
+ # Avoid pandas==2.2.0rc0 as this version causes PyArrow to fail. Once newer
+ # prerelease comes out, this constraint can be removed. See
+ # https://github.com/googleapis/python-db-dtypes-pandas/issues/234
session.install(
"--extra-index-url",
"https://pypi.anaconda.org/scipy-wheels-nightly/simple",
"--prefer-binary",
"--pre",
"--upgrade",
- "pandas",
+ "pandas!=2.2.0rc0",
)
session.install(
"mock",
| tests.unit.test_arrow: many tests failed
Many tests failed at the same time in this package.
* I will close this issue when there are no more failures in this package _and_
there is at least one pass.
* No new issues will be filed for this package until this issue is closed.
* If there are already issues for individual test cases, I will close them when
the corresponding test passes. You can close them earlier, if you prefer, and
I won't reopen them while this issue is still open.
Here are the tests that failed:
* test_series_from_arrow[expected0-pyarrow_array0]
* test_series_from_arrow[expected1-pyarrow_array1]
* test_series_from_arrow[expected2-pyarrow_array2]
* test_series_from_arrow[expected3-pyarrow_array3]
* test_series_from_arrow[expected4-pyarrow_array4]
* test_series_from_arrow[expected5-pyarrow_array5]
* test_series_from_arrow[expected6-pyarrow_array6]
* test_series_from_arrow[expected7-pyarrow_array7]
* test_series_from_arrow[expected8-pyarrow_array8]
* test_series_from_arrow[expected9-pyarrow_array9]
* test_series_from_arrow[expected10-pyarrow_array10]
* test_series_from_arrow[expected11-pyarrow_array11]
* test_series_from_arrow[expected12-pyarrow_array12]
* test_series_from_arrow[expected13-pyarrow_array13]
* test_series_from_arrow[expected14-pyarrow_array14]
* test_series_from_arrow[expected15-pyarrow_array15]
* test_series_from_arrow[expected16-pyarrow_array16]
* test_series_from_arrow[expected17-pyarrow_array17]
* test_series_from_arrow[time-nanoseconds-arrow-round-trip]
* test_series_from_arrow[time-nanoseconds-arrow-from-string] (#120)
* test_dataframe_from_arrow
-----
commit: 12156f4b2560aeae15e299307e871146c79efc38
buildURL: [Build Status](https://source.cloud.google.com/results/invocations/aa201988-b9df-4c1e-9744-d0a3a22fae1b), [Sponge](http://sponge2/aa201988-b9df-4c1e-9744-d0a3a22fae1b)
status: failed
| Looks like this issue is flaky. :worried:
I'm going to leave this open and stop commenting.
A human should fix and close this.
---
When run at the same commit (12156f4b2560aeae15e299307e871146c79efc38), this test passed in one build ([Build Status](https://source.cloud.google.com/results/invocations/aa201988-b9df-4c1e-9744-d0a3a22fae1b), [Sponge](http://sponge2/aa201988-b9df-4c1e-9744-d0a3a22fae1b)) and failed in another build ([Build Status](https://source.cloud.google.com/results/invocations/aa201988-b9df-4c1e-9744-d0a3a22fae1b), [Sponge](http://sponge2/aa201988-b9df-4c1e-9744-d0a3a22fae1b)).
This has something to do with a pandas release candidate `pandas==2.2.0rc0`. In this release candidate, `make_block()` is [deprecated](https://pandas.pydata.org/pandas-docs/version/2.2.0rc0/whatsnew/v2.2.0.html#other-deprecations). It got quickly [reverted](https://github.com/pandas-dev/pandas/pull/56481) for the same issue we are encountering here, as pyarrow is still using this method. We will keep this issue open, wait for the newer pre-release version of pandas to come out, and close this issue when the tests stop failing.
@Linchin Rather than sitting on this and waiting for pandas to change their release candidate (`rc`): I would suggest we update our install instructions to not use this particular `rc` during testing so that this issue can be closed.
* `pandas==2.2.0rc0`
I would be very specific in pinpointing this version versus some sort of greater than equality check so that we don't accidentally prevent it from trying new versions when `2.2.0rc1` OR higher rolls out.
That's a great idea! I'll do this | 2024-01-19T19:33:32 | 0.0 | [] | [] |
||
elimuinformatics/vcf2fhir | elimuinformatics__vcf2fhir-55 | 672337ea53e28b3b4f53a088263d59bfaab2db24 | diff --git a/vcf2fhir/common.py b/vcf2fhir/common.py
index 9df8239..0027b8f 100644
--- a/vcf2fhir/common.py
+++ b/vcf2fhir/common.py
@@ -4,6 +4,7 @@
import pytz
import logging
import re
+from collections import OrderedDict
general_logger = logging.getLogger("vcf2fhir.general")
@@ -139,3 +140,11 @@ def _error_log_allelicstate(record):
"Cannot Determine AllelicState for: %s , considered sample: %s",
record,
record.samples[0].data)
+
+
+def createOrderedDict(value_from, order):
+ value_to = OrderedDict()
+ for key in order:
+ if key in value_from.keys():
+ value_to[key] = value_from[key]
+ return value_to
diff --git a/vcf2fhir/fhir_helper.py b/vcf2fhir/fhir_helper.py
index 4089a26..87f9714 100644
--- a/vcf2fhir/fhir_helper.py
+++ b/vcf2fhir/fhir_helper.py
@@ -9,10 +9,19 @@
import fhirclient.models.fhirdate as date
import fhirclient.models.range as valRange
import fhirclient.models.medicationstatement as medication
-from collections import OrderedDict
+import numpy as np
from uuid import uuid4
from .common import *
+CG_ORDER = ["system", "code"]
+CODE_ORD = ["system", "code", "display"]
+RS_ORDER = ['resourceType', 'id', 'meta', 'status', 'category', 'code',
+ 'subject', 'component']
+DV_ORDER = ['resourceType', 'id', 'meta', 'status', 'category', 'code',
+ 'subject', 'valueCodeableConcept', 'component']
+SID_ORDER = ['resourceType', 'id', 'meta', 'status', 'category',
+ 'code', 'subject', 'valueCodeableConcept', 'derivedFrom']
+
class _Fhir_Helper:
def __init__(self, patientID):
@@ -459,11 +468,8 @@ def generate_final_json(self):
od["result"] = response['result']
else:
od["result"] = []
- od_code_coding = OrderedDict()
- od_code_coding["system"] = od["code"]["coding"][0]["system"]
- od_code_coding["code"] = od["code"]["coding"][0]["code"]
- od_code_coding["display"] = od["code"]["coding"][0]["display"]
- od["code"]["coding"][0] = od_code_coding
+ od['code']['coding'][0] =\
+ createOrderedDict(od['code']['coding'][0], CODE_ORD)
sidIndex = 0
for index, fhirReport in enumerate(od['contained']):
@@ -487,110 +493,40 @@ def generate_final_json(self):
fhirReport['derivedFrom'] = derivedFrom
for k, i in enumerate(od['contained']):
+ od_contained_k = od['contained'][k]
+ v_c_c = 'valueCodeableConcept'
+
if (i['category'][0]['coding'][0]):
- od_category_coding = OrderedDict()
- temp = i['category'][0]['coding'][0]["system"]
- od_category_coding["system"] = temp
- temp = i['category'][0]['coding'][0]["code"]
- od_category_coding["code"] = temp
- temp = od_category_coding
- od['contained'][k]['category'][0]['coding'][0] = temp
+ od_contained_k['category'][0]['coding'][0] =\
+ createOrderedDict(i['category'][0]['coding'][0], CG_ORDER)
if (i['code']['coding'][0]):
- od_code_coding = OrderedDict()
- od_code_coding["system"] = i['code']['coding'][0]["system"]
- od_code_coding["code"] = i['code']['coding'][0]["code"]
- od_code_coding["display"] = i['code']['coding'][0]["display"]
- od['contained'][k]['code']['coding'][0] = od_code_coding
+ od_contained_k['code']['coding'][0] =\
+ createOrderedDict(i['code']['coding'][0], CODE_ORD)
- if 'valueCodeableConcept' in i.keys():
- od_value_codeable_concept_coding = OrderedDict()
- temp = i['valueCodeableConcept']['coding'][0]["system"]
- od_value_codeable_concept_coding["system"] = temp
- temp = i['valueCodeableConcept']['coding'][0]["code"]
- od_value_codeable_concept_coding["code"] = temp
- temp = i['valueCodeableConcept']['coding'][0]["display"]
- od_value_codeable_concept_coding["display"] = temp
- temp = od_value_codeable_concept_coding
- od['contained'][k]['valueCodeableConcept']['coding'][0] = temp
+ if v_c_c in i.keys():
+ od_contained_k[v_c_c]['coding'][0] =\
+ createOrderedDict(i[v_c_c]['coding'][0], CODE_ORD)
if ((i['id'].startswith('dv-')) or (i['id'].startswith('rs-'))):
for q, j in enumerate(i['component']):
- od_component_code_coding = OrderedDict()
- if j['code']['coding'][0]["system"]:
- temp = j['code']['coding'][0]["system"]
- od_component_code_coding["system"] = temp
- if j['code']['coding'][0]["code"]:
- temp = j['code']['coding'][0]["code"]
- od_component_code_coding["code"] = temp
- if j['code']['coding'][0]["display"]:
- temp = j['code']['coding'][0]["display"]
- od_component_code_coding["display"] = temp
- if od['contained'][k]['component'][q]['code']['coding'][0]:
- temp = od_component_code_coding
- s1 = 'contained'
- s2 = 'component'
- od[s1][k][s2][q]['code']['coding'][0] = temp
+ od_contained_k_component_q = od_contained_k['component'][q]
+ if od_contained_k_component_q['code']['coding'][0]:
+ od_contained_k_component_q['code']['coding'][0] =\
+ createOrderedDict(j['code']['coding'][0], CODE_ORD)
- od_componentvalue_codeable_concept = OrderedDict()
- if 'valueCodeableConcept' in j.keys():
- temp = j['valueCodeableConcept']['coding'][0]["system"]
- od_componentvalue_codeable_concept["system"] = temp
- if 'code' in j['valueCodeableConcept']['coding'][0]\
- .keys(
- ):
- t = j['valueCodeableConcept']['coding'][0]["code"]
- od_componentvalue_codeable_concept["code"] = t
- if 'display' in j['valueCodeableConcept']['coding'][0]\
- .keys(
- ):
- s1 = 'valueCodeableConcept'
- s2 = 'display'
- temp = j[s1]['coding'][0]["display"]
- od_componentvalue_codeable_concept[s2] = temp
- s1 = 'contained'
- s2 = 'component'
- s3 = 'valueCodeableConcept'
- temp = od_componentvalue_codeable_concept
- od[s1][k][s2][q][s3]['coding'][0] = temp
+ if v_c_c in j.keys():
+ od_contained_k_component_q[v_c_c]['coding'][0] =\
+ createOrderedDict(j[v_c_c]['coding'][0], CODE_ORD)
if (i['id'].startswith('rs-')):
- od_RS = OrderedDict()
- od_RS["resourceType"] = i['resourceType']
- od_RS["id"] = i['id']
- od_RS["meta"] = i['meta']
- od_RS["status"] = i['status']
- od_RS["category"] = i['category']
- od_RS["code"] = i['code']
- od_RS["subject"] = i['subject']
- od_RS["component"] = i['component']
- od['contained'][k] = od_RS
+ od['contained'][k] = createOrderedDict(i, RS_ORDER)
if (i['id'].startswith('dv-')):
- od_DV = OrderedDict()
- od_DV["resourceType"] = i['resourceType']
- od_DV["id"] = i['id']
- od_DV["meta"] = i['meta']
- od_DV["status"] = i['status']
- od_DV["category"] = i['category']
- od_DV["code"] = i['code']
- od_DV["subject"] = i['subject']
- od_DV["valueCodeableConcept"] = i['valueCodeableConcept']
- od_DV["component"] = i['component']
- od['contained'][k] = od_DV
+ od['contained'][k] = createOrderedDict(i, DV_ORDER)
if (i['id'].startswith('sid-')):
- od_SID = OrderedDict()
- od_SID["resourceType"] = i['resourceType']
- od_SID["id"] = i['id']
- od_SID["meta"] = i['meta']
- od_SID["status"] = i['status']
- od_SID["category"] = i['category']
- od_SID["code"] = i['code']
- od_SID["subject"] = i['subject']
- od_SID["valueCodeableConcept"] = i['valueCodeableConcept']
- od_SID["derivedFrom"] = i['derivedFrom']
- od['contained'][k] = od_SID
+ od['contained'][k] = createOrderedDict(i, SID_ORDER)
self.fhir_json = od
def export_fhir_json(self, output_filename):
| Simplifying the code
## Prerequisites
- [x] I am running the latest version
- [X] I checked the documentation and found no answer
- [X] I checked to make sure that this issue has not already been filed
## Context
* Package Version: Latest merge
* Operating System: Window 10
## Current Behavior & Expected Behavior
Example : In fhir_helper.py (line 564)
```md
temp = j['valueCodeableConcept']['coding'][0]["system"]
od_componentvalue_codeable_concept["system"] = temp
```
it can be converted into this
```md
od_componentvalue_codeable_concept["system"] = j['valueCodeableConcept']['coding'][0]["system"]
```
and like these code of lines in the project
| @rhdolin @srgothi92 please tell me if this cannot be considered as an issue or if there something relate to this code that I am not aware of
@abhishek-jain-1999, the code was `od_componentvalue_codeable_concept["system"] = j['valueCodeableConcept']['coding'][0]["system"]` before and I changed it to
`temp = j['valueCodeableConcept']['coding'][0]["system"]`
`od_componentvalue_codeable_concept["system"] = temp`
because according to **PEP 8** we can only have 79 characters in a line.
@Rohan-cod okay now I understand why there were such changes but don't you think it is useless to have a temp variable made in memory not only just to give it away it next line but also to keep in memory until it is dumped
in short double memory usage will be there and unnecessary time will be used as it is been done at multiple places
and also as code reader it is confusing to see a variable been made and not been use later
like for example
```md
if 'code' in j['valueCodeableConcept']['coding'][0]\
.keys(
):
t = j['valueCodeableConcept']['coding'][0]["code"]
od_componentvalue_codeable_concept["code"] = t
```
this looks much simpler to understand
```md
if 'code' in j['valueCodeableConcept']['coding'][0].keys():
od_componentvalue_codeable_concept["code"] = j['valueCodeableConcept']['coding'][0]["code"]
```
You are absolutely correct @abhishek-jain-1999. If you have any other way to conform to the rule of **having no more than 79 characters in a single line** you can do that, but using `od_componentvalue_codeable_concept["code"] = j['valueCodeableConcept']['coding'][0]["code"]` will cause the checks to fail.
@Rohan-cod Thanks for your response . Let me see if I am able to find another way to handle this issue
No Problem @abhishek-jain-1999 ð. All the best ð
@abhishek-jain-1999, I figured out a way to do this. I missed it while I was changing the code ð
.
Changing the code from ð
```lisp
if 'code' in j['valueCodeableConcept']['coding'][0]\
.keys(
):
t = j['valueCodeableConcept']['coding'][0]["code"]
od_componentvalue_codeable_concept["code"] = t
```
to ð
```lisp
if 'code' in j['valueCodeableConcept']['coding'][0]\
.keys(
):
od_componentvalue_codeable_concept["code"] =\
j['valueCodeableConcept']['coding'][0]["code"]
```
and doing the same at other locations will help you remove most of the temporary variables.
Just an option. Completely up to you, if you want you can use my approach.
Nice work everyone, I really like the conversation that is happening in each issues and PR.
Contributors Cheers ð·
> Nice work everyone, I really like the conversation that is happening in each issues and PR.
>
> Contributors Cheers ð·
Cheers @srgothi92 ð | 2021-04-09T19:59:39 | 0.0 | [] | [] |
||
EnableSecurity/wafw00f | EnableSecurity__wafw00f-125 | 093195c6f19c2ac656f53142a0a966677efa8826 | diff --git a/wafw00f/main.py b/wafw00f/main.py
old mode 100755
new mode 100644
index d36da6a5..37e14463
--- a/wafw00f/main.py
+++ b/wafw00f/main.py
@@ -137,20 +137,22 @@ def genericdetect(self):
return True
# Checking for the Server header after sending malicious requests
+ normalserver, attackresponse_server = '', ''
response = self.attackres
- normalserver = resp1.headers.get('Server')
- attackresponse_server = response.headers.get('Server')
- if attackresponse_server:
- if attackresponse_server != normalserver:
- self.log.info('Server header changed, WAF possibly detected')
- self.log.debug('Attack response: %s' % attackresponse_server)
- self.log.debug('Normal response: %s' % normalserver)
- reason = reasons[1]
- reason += '\r\nThe server header for a normal response is "%s",' % normalserver
- reason += ' while the server header a response to an attack is "%s",' % attackresponse_server
- self.knowledge['generic']['reason'] = reason
- self.knowledge['generic']['found'] = True
- return True
+ if 'server' in resp1.headers:
+ normalserver = resp1.headers.get('Server')
+ if 'server' in response.headers:
+ attackresponse_server = response.headers.get('Server')
+ if attackresponse_server != normalserver:
+ self.log.info('Server header changed, WAF possibly detected')
+ self.log.debug('Attack response: %s' % attackresponse_server)
+ self.log.debug('Normal response: %s' % normalserver)
+ reason = reasons[1]
+ reason += '\r\nThe server header for a normal response is "%s",' % normalserver
+ reason += ' while the server header a response to an attack is "%s",' % attackresponse_server
+ self.knowledge['generic']['reason'] = reason
+ self.knowledge['generic']['found'] = True
+ return True
# If at all request doesn't go, press F
except RequestBlocked:
@@ -340,7 +342,7 @@ def main():
try:
m = [i.replace(')', '').split(' (') for i in wafdetectionsprio]
print(R+' WAF Name'+' '*24+'Manufacturer\n '+'-'*8+' '*24+'-'*12+'\n')
- max_len = max(len(str(x)) for k in m for x in k)
+ max_len = max(len(str(x)) for k in m for x in k)
for inner in m:
first = True
for elem in inner:
@@ -382,7 +384,7 @@ def main():
elif options.input.endswith('.csv'):
columns = defaultdict(list)
with open(options.input) as f:
- reader = csv.DictReader(f)
+ reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns[k].append(v)
@@ -462,7 +464,7 @@ def main():
elif options.output.endswith('.csv'):
log.debug("Exporting data in csv format to file: %s" % (options.output))
with open(options.output, 'w') as outfile:
- csvwriter = csv.writer(outfile, delimiter=',', quotechar='"',
+ csvwriter = csv.writer(outfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
count = 0
for result in results:
| AttributeError: 'NoneType' object has no attribute 'headers'
`$ wafw00f http://balancepayout.paypal.com `
```
Traceback (most recent call last):
File "/usr/local/bin/wafw00f", line 4, in <module>
__import__('pkg_resources').run_script('wafw00f==2.1.0', 'wafw00f')
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 658, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 1438, in run_script
exec(code, namespace, namespace)
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/EGG-INFO/scripts/wafw00f", line 8, in <module>
main.main()
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 442, in main
if attacker.genericdetect():
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 142, in genericdetect
attackresponse_server = response.headers.get('Server')
AttributeError: 'NoneType' object has no attribute 'headers'
```
AttributeError: 'NoneType' object has no attribute 'headers'
`$ wafw00f http://balancepayout.paypal.com `
```
Traceback (most recent call last):
File "/usr/local/bin/wafw00f", line 4, in <module>
__import__('pkg_resources').run_script('wafw00f==2.1.0', 'wafw00f')
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 658, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 1438, in run_script
exec(code, namespace, namespace)
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/EGG-INFO/scripts/wafw00f", line 8, in <module>
main.main()
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 442, in main
if attacker.genericdetect():
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 142, in genericdetect
attackresponse_server = response.headers.get('Server')
AttributeError: 'NoneType' object has no attribute 'headers'
```
| Verified that is a bug. Will shortly push a fix to this.
Verified that is a bug. Will shortly push a fix to this. | 2021-01-22T16:04:12 | 0.0 | [] | [] |
||
augerai/a2ml | augerai__a2ml-515 | 2392fb65db622fd6c204f132ee5a63b41aae1a0e | diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index b9fbf172..c92955a7 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -304,7 +304,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
- Iris-setosa
* - Iris-virginica
- Iris-virginica
- * It may also contain train features to retrain while Review
+ * It may also contain train features to retrain while Review(if target missed) and for distribution chart
This method support only one provider
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index a79abb71..ed58746d 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -149,7 +149,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
- Iris-setosa
* - Iris-virginica
- Iris-virginica
- * It may also contain train features to retrain while Review
+ * It may also contain train features to retrain while Review(if target missed) and for distribution chart
This method support only one provider
diff --git a/setup.py b/setup.py
index 22feb59b..bdedc7de 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ def run(self):
'smart_open==1.9.0', # version for azure
'jsonpickle',
'websockets',
- 'liac-arff',
+ 'liac-arff==2.4.0',
'xlrd==1.2.0'
]
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-01-26T16:23:07 | 0.0 | [] | [] |
|||
googleapis/python-logging | googleapis__python-logging-848 | 1216cf61b161ed10281842242b711a7b95fea675 | diff --git a/README.rst b/README.rst
index 2618dc37a..84dd1e77f 100644
--- a/README.rst
+++ b/README.rst
@@ -61,8 +61,8 @@ Python >= 3.7
Unsupported Python Versions
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Python == 2.7. The last version of the library compatible with Python 2.7 is `google-cloud-logging==1.15.1`.
-Python == 3.6. The last version of the library compatible with Python 3.6 is `google-cloud-logging==3.1.2`.
+| Python == 2.7. The last version of the library compatible with Python 2.7 is ``google-cloud-logging==1.15.1``.
+| Python == 3.6. The last version of the library compatible with Python 3.6 is ``google-cloud-logging==3.1.2``.
Mac/Linux
diff --git a/docs/std-lib-integration.rst b/docs/std-lib-integration.rst
index a485fce6d..be43231fd 100644
--- a/docs/std-lib-integration.rst
+++ b/docs/std-lib-integration.rst
@@ -44,6 +44,16 @@ There are two supported handler classes to choose from:
to standard out, to be read and parsed by a GCP logging agent
- This is the default handler on Kubernetes Engine, Cloud Functions and Cloud Run
+Handler classes can also be specified via `dictConfig <https://docs.python.org/3/library/logging.config.html#logging-config-dictschema>`_:
+
+.. literalinclude:: ../samples/snippets/usage_guide.py
+ :start-after: [START logging_dict_config]
+ :end-before: [END logging_dict_config]
+ :dedent: 4
+
+Note that since :class:`~google.cloud.logging_v2.handlers.handlers.CloudLoggingHandler` requires an already initialized :class:`~google.cloud.logging_v2.client.Client`,
+you must initialize a client and include it in the dictConfig entry for a `CloudLoggingHandler`.
+
Standard Library
---------------------------
@@ -101,8 +111,7 @@ The following fields are currently supported:
- :ref:`json_fields<JSON>`
.. note::
- Fields marked with "*" require a supported Python web framework. The Google Cloud Logging
- library currently supports `flask <https://flask.palletsprojects.com/>`_ and `django <https://www.djangoproject.com/>`_
+ Fields marked with "*" require a :doc:`supported Python web framework </web-framework-integration>`.
Manual Metadata Using the `extra` Argument
--------------------------------------------
diff --git a/docs/usage.rst b/docs/usage.rst
index 929ee9cef..7541f355b 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -4,6 +4,7 @@ Usage Guide
:maxdepth: 2
std-lib-integration
+ web-framework-integration
direct-lib-usage
grpc-vs-http
diff --git a/docs/web-framework-integration.rst b/docs/web-framework-integration.rst
new file mode 100644
index 000000000..d91d714b3
--- /dev/null
+++ b/docs/web-framework-integration.rst
@@ -0,0 +1,32 @@
+Integration with Python Web Frameworks
+======================================
+
+The Google Cloud Logging library can integrate with Python web frameworks
+`flask <https://flask.palletsprojects.com/>`_ and `django <https://www.djangoproject.com/>`_ to
+automatically populate `LogEntry fields <https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry>`_
+`trace`, `span_id`, `trace_sampled`, and `http_request`.
+
+Django
+------
+
+Django integration has been tested to work with each of the Django/Python versions listed `here <https://docs.djangoproject.com/en/stable/faq/install/#what-python-version-can-i-use-with-django>`_.
+To enable Django integration, add `google.cloud.logging_v2.handlers.middleware.RequestMiddleware` to the list of `MIDDLEWARE`
+in your `settings <https://docs.djangoproject.com/en/stable/topics/settings/>`_ file. Also be sure to :doc:`set up logging </std-lib-integration>` in your settings file.
+
+Flask
+-----
+
+Flask integration has been tested to work with the following versions of Flask:
+
+=============== ==============
+Python version Flask versions
+=============== ==============
+3.7 >=1.0.0
+3.8 >=1.0.0
+3.9 >=1.0.0
+3.10 >=1.0.3
+3.11 >=1.0.3
+3.12 >=1.0.3
+=============== ==============
+
+Be sure to :doc:`set up logging </std-lib-integration>` before declaring the Flask app.
diff --git a/google/cloud/logging_v2/handlers/_helpers.py b/google/cloud/logging_v2/handlers/_helpers.py
index 43678ed0d..f0c301ceb 100644
--- a/google/cloud/logging_v2/handlers/_helpers.py
+++ b/google/cloud/logging_v2/handlers/_helpers.py
@@ -66,7 +66,7 @@ def get_request_data_from_flask():
Returns:
Tuple[Optional[dict], Optional[str], Optional[str], bool]:
Data related to the current http request, trace_id, span_id and trace_sampled
- for the request. All fields will be None if a django request isn't found.
+ for the request. All fields will be None if a Flask request isn't found.
"""
if flask is None or not flask.request:
return None, None, None, False
diff --git a/samples/snippets/usage_guide.py b/samples/snippets/usage_guide.py
index 5c9e86990..f4292a9de 100644
--- a/samples/snippets/usage_guide.py
+++ b/samples/snippets/usage_guide.py
@@ -484,6 +484,37 @@ def setup_logging(client):
# [END setup_logging_excludes]
+@snippet
+def logging_dict_config(client):
+ import logging.config
+
+ # [START logging_dict_config]
+ import google.cloud.logging
+
+ client = google.cloud.logging.Client()
+
+ LOGGING = {
+ "version": 1,
+ "handlers": {
+ "cloud_logging": {
+ "class": "google.cloud.logging.handlers.CloudLoggingHandler",
+ "client": client,
+ },
+ "structured_log": {
+ "class": "google.cloud.logging.handlers.StructuredLogHandler"
+ },
+ },
+ "root": {"handlers": ["console"], "level": "WARNING"},
+ "loggers": {
+ "my_logger": {"handlers": ["cloud_logging"], "level": "INFO"},
+ "my_other_logger": {"handlers": ["structured_log"], "level": "INFO"},
+ },
+ }
+ # [END logging_dict_config]
+
+ logging.config.dictConfig(LOGGING)
+
+
def _line_no(func):
return func.__code__.co_firstlineno
| document django middleware
The django middleware isn't documented anywhere as far as I can see.
It needs to be documented here: https://cloud.google.com/logging/docs/setup/python#write_logs_with_the_standard_python_logging_handler
There are lots of people reporting the logs don't have an http info and this could certainly be a root cause.
Perhaps it was documented at one point? See this issue here:
https://github.com/googleapis/python-logging/issues/677
Or maybe we're all just looking through the source.
Thanks.
| @maclek I am currently working on documenting Django integrations. Is there anything else that you have found that needs to be done in other to get them to work, other than adding the middleware to `settings.py`? | 2024-02-01T20:05:09 | 0.0 | [] | [] |
||
bo4e/BO4E-python | bo4e__BO4E-python-502 | b5597da4f01fa4549dba6cdcdbe7bc54d39dcf72 | diff --git a/src/bo4e/bo/messlokation.py b/src/bo4e/bo/messlokation.py
index bcbbdb12e..4bee7da4e 100644
--- a/src/bo4e/bo/messlokation.py
+++ b/src/bo4e/bo/messlokation.py
@@ -3,7 +3,7 @@
and corresponding marshmallow schema for de-/serialization
"""
import re
-from typing import Annotated, List, Optional
+from typing import Annotated, Any, List, Optional
from iso3166 import countries
from pydantic import Field, field_validator, model_validator
@@ -118,10 +118,14 @@ def _validate_messlokations_id_country_code(cls, messlokations_id: str) -> str:
"Checks that if an address is given, that there is only one valid address given"
# pylint: disable=no-self-argument
- @model_validator(mode="after") # type:ignore[arg-type]
+ @model_validator(mode="before")
@classmethod
- def validate_grundzustaendiger_x_codenr(cls, model: "Messlokation") -> "Messlokation":
+ def validate_grundzustaendiger_x_codenr(cls, data: Any) -> dict[str, Any]:
"""Checks that if a codenr is given, that there is only one valid codenr given."""
- if model.grundzustaendiger_msb_codenr is not None and model.grundzustaendiger_msbim_codenr is not None:
+ assert isinstance(data, dict), "data is not a dict"
+ if (
+ data.get("grundzustaendiger_msb_codenr", None) is not None
+ and data.get("grundzustaendiger_msbim_codenr", None) is not None
+ ):
raise ValueError("More than one codenr is given.")
- return model
+ return data
diff --git a/src/bo4e/validators.py b/src/bo4e/validators.py
index eec866803..14fe3e9b2 100644
--- a/src/bo4e/validators.py
+++ b/src/bo4e/validators.py
@@ -45,15 +45,15 @@ def combinations_of_fields(
def supplied(value: Any) -> bool:
return value is not None and (not isinstance(value, str) or value != "")
- def validator(self: ModelT) -> ModelT:
- bools = tuple(int(supplied(getattr(self, field))) for field in fields)
+ def validator(cls: type[ModelT], data: dict[str, Any]) -> dict[str, Any]:
+ bools = tuple(int(supplied(data.get(field, None))) for field in fields)
if bools in valid_combinations:
- return self
+ return data
if custom_error_message:
raise ValueError(custom_error_message)
- raise ValueError(f"Invalid combination of fields {fields} for {self!r}: {bools}")
+ raise ValueError(f"Invalid combination of fields {fields} for {cls!r}: {bools}")
- return model_validator(mode="after")(validator)
+ return model_validator(mode="before")(validator)
# pylint:disable=unused-argument
| Change `model_validator(mode="after")` to `model_validator(mode="before")`
This is useful for projects using this library especially for Unittests if you are using `model_construct` a lot. This is actually due to a bug (or unintendend side effect), see here https://github.com/pydantic/pydantic/issues/6978.
For instance, this helps to resolve issues in PR https://github.com/Hochfrequenz/powercloud2lynqtech/pull/1816.
Otherwise, you would have to use `model_construct` on every nested structure.
| 2023-08-07T21:39:21 | 0.0 | [] | [] |
|||
learnables/learn2learn | learnables__learn2learn-403 | 39db32f25b91778beceee19624f8f98709deb78e | diff --git a/CHANGELOG.md b/CHANGELOG.md
index e4dee544..4d21350d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Example for `detach_module`. ([Nimish Sanghi](https://github.com/nsanghi))
* Loading duplicate FGVC Aircraft images.
* Move vision datasets to Zenodo. (mini-ImageNet, tiered-ImageNet, FC100, CIFAR-FS)
+* mini-ImageNet targets are now ints (not np.float64).
## v0.1.7
diff --git a/learn2learn/vision/datasets/cifarfs.py b/learn2learn/vision/datasets/cifarfs.py
index 3eea95a7..177ce5f2 100644
--- a/learn2learn/vision/datasets/cifarfs.py
+++ b/learn2learn/vision/datasets/cifarfs.py
@@ -95,7 +95,7 @@ def _download(self):
with zipfile.ZipFile(zip_file, 'r') as zfile:
zfile.extractall(self.raw_path)
os.remove(zip_file)
- except:
+ except Exception:
download_file_from_google_drive('1pTsCCMDj45kzFYgrnO67BWVbKs48Q3NI',
zip_file)
with zipfile.ZipFile(zip_file, 'r') as zfile:
diff --git a/learn2learn/vision/datasets/fc100.py b/learn2learn/vision/datasets/fc100.py
index 325ba3f0..abc7b543 100644
--- a/learn2learn/vision/datasets/fc100.py
+++ b/learn2learn/vision/datasets/fc100.py
@@ -93,7 +93,7 @@ def download(self):
archive_file = zipfile.ZipFile(archive_path)
archive_file.extractall(self.root)
os.remove(archive_path)
- except:
+ except Exception:
try: # Download from Google Drive first
download_file_from_google_drive(FC100.GOOGLE_DRIVE_FILE_ID,
archive_path)
diff --git a/learn2learn/vision/datasets/mini_imagenet.py b/learn2learn/vision/datasets/mini_imagenet.py
index 8234e551..f039e2d9 100644
--- a/learn2learn/vision/datasets/mini_imagenet.py
+++ b/learn2learn/vision/datasets/mini_imagenet.py
@@ -109,7 +109,7 @@ def __init__(
download_file(dropbox_file_link, pickle_file)
with open(pickle_file, 'rb') as f:
self.data = pickle.load(f)
- except:
+ except Exception:
try:
if not self._check_exists() and download:
print('Downloading mini-ImageNet --', mode)
@@ -136,7 +136,7 @@ def __getitem__(self, idx):
data = self.x[idx]
if self.transform:
data = self.transform(data)
- return data, self.y[idx]
+ return data, int(self.y[idx])
def __len__(self):
return len(self.x)
diff --git a/learn2learn/vision/datasets/tiered_imagenet.py b/learn2learn/vision/datasets/tiered_imagenet.py
index 6a7ee691..c7a09419 100644
--- a/learn2learn/vision/datasets/tiered_imagenet.py
+++ b/learn2learn/vision/datasets/tiered_imagenet.py
@@ -105,7 +105,7 @@ def download(self, file_id, destination):
source=file_url,
destination=file_dest,
)
- except:
+ except Exception:
archive_path = os.path.join(destination, 'tiered_imagenet.tar')
download_file_from_google_drive(file_id, archive_path)
archive_file = tarfile.open(archive_path)
| Return pytorch tensor for mini-imagenet labels?
https://github.com/learnables/learn2learn/blob/06893e847693a0227d5f35a6e065e6161bb08201/learn2learn/vision/datasets/mini_imagenet.py#L111
Currently, when loading mini-imagenet the inputs are returned as pytorch tensors while the labels as numpy arrays. Since the user will likely use both in a training loop, does it make sense to cast the labels for long pytorch tensors?
| Thanks for spotting this and the docstring (#258) issue in mini-ImageNet @pietrolesci. Yes, both should be tensors.
In fact, let me piggy-back and mention one more issue: in the benchmarks, mini-ImageNet and tiered-ImageNet return samples with different input ranges, with `data_augmentation=None`. (Tiered is 0-1 but mini is 0-255.) Ideally, all vision benchmarks would have the same ranges and types by default, probably 0-255 uint8 for images.
Hi @seba-1511, thanks for picking this up and for your answer :) | 2023-05-29T10:25:46 | 0.0 | [] | [] |
||
nlpsuge/xsession-manager | nlpsuge__xsession-manager-33 | e6bf5a9fa279c9f4ef99c6ac672ea96d4175519a | diff --git a/xsession_manager/arguments_handler.py b/xsession_manager/arguments_handler.py
index b023809..95fc754 100755
--- a/xsession_manager/arguments_handler.py
+++ b/xsession_manager/arguments_handler.py
@@ -170,7 +170,7 @@ def handle_arguments(self):
try:
file_path = Path(constants.Locations.BASE_LOCATION_OF_SESSIONS, file)
with open(file_path, 'r') as f:
- num = num + 1
+ num += 1
namespace_objs: XSessionConfig = json.load(f, object_hook=lambda d: Namespace(**d))
print(str(num) +'. ' + namespace_objs.session_name,
namespace_objs.session_create_time,
@@ -201,7 +201,7 @@ def handle_arguments(self):
# Print data according to declared order
ordered_variables = vars(XSessionConfigObject)['__annotations__']
for x_session_config_object in x_session_config_objects:
- count = count + 1
+ count += 1
print('%d.' % count)
# Get fields in declared order
diff --git a/xsession_manager/xsession_manager.py b/xsession_manager/xsession_manager.py
index ed8777e..f4ead2f 100755
--- a/xsession_manager/xsession_manager.py
+++ b/xsession_manager/xsession_manager.py
@@ -233,7 +233,7 @@ def _move_windows_while_restore(self, session_name, x_session_config_objects_cop
while Gtk.events_pending():
Gtk.main_iteration()
- retry_count_down = retry_count_down - 1
+ retry_count_down -= 1
self._suppress_log_if_already_in_workspace = True
self.move_window(session_name)
@@ -281,7 +281,7 @@ def _restore_sessions(self,
running_restores.append(index)
is_running = True
with self.instance_lock:
- self.restore_app_countdown = self.restore_app_countdown - 1
+ self.restore_app_countdown -= 1
break
if is_running:
continue
| Convert four assignment statements to the usage of augmented operators
:eyes: Some source code analysis tools can help to find opportunities for improving software components.
:thought_balloon: I propose to [increase the usage of augmented assignment statements](https://docs.python.org/3/reference/simple_stmts.html#augmented-assignment-statements "Augmented assignment statements") accordingly.
```diff
diff --git a/xsession_manager/arguments_handler.py b/xsession_manager/arguments_handler.py
index b023809..95fc754 100755
--- a/xsession_manager/arguments_handler.py
+++ b/xsession_manager/arguments_handler.py
@@ -170,7 +170,7 @@ class ArgumentsHandler():
try:
file_path = Path(constants.Locations.BASE_LOCATION_OF_SESSIONS, file)
with open(file_path, 'r') as f:
- num = num + 1
+ num += 1
namespace_objs: XSessionConfig = json.load(f, object_hook=lambda d: Namespace(**d))
print(str(num) +'. ' + namespace_objs.session_name,
namespace_objs.session_create_time,
@@ -201,7 +201,7 @@ class ArgumentsHandler():
# Print data according to declared order
ordered_variables = vars(XSessionConfigObject)['__annotations__']
for x_session_config_object in x_session_config_objects:
- count = count + 1
+ count += 1
print('%d.' % count)
# Get fields in declared order
diff --git a/xsession_manager/xsession_manager.py b/xsession_manager/xsession_manager.py
index ed8777e..f4ead2f 100755
--- a/xsession_manager/xsession_manager.py
+++ b/xsession_manager/xsession_manager.py
@@ -233,7 +233,7 @@ class XSessionManager:
while Gtk.events_pending():
Gtk.main_iteration()
- retry_count_down = retry_count_down - 1
+ retry_count_down -= 1
self._suppress_log_if_already_in_workspace = True
self.move_window(session_name)
@@ -281,7 +281,7 @@ class XSessionManager:
running_restores.append(index)
is_running = True
with self.instance_lock:
- self.restore_app_countdown = self.restore_app_countdown - 1
+ self.restore_app_countdown -= 1
break
if is_running:
continue
```
Convert four assignment statements to the usage of augmented operators
:eyes: Some source code analysis tools can help to find opportunities for improving software components.
:thought_balloon: I propose to [increase the usage of augmented assignment statements](https://docs.python.org/3/reference/simple_stmts.html#augmented-assignment-statements "Augmented assignment statements") accordingly.
```diff
diff --git a/xsession_manager/arguments_handler.py b/xsession_manager/arguments_handler.py
index b023809..95fc754 100755
--- a/xsession_manager/arguments_handler.py
+++ b/xsession_manager/arguments_handler.py
@@ -170,7 +170,7 @@ class ArgumentsHandler():
try:
file_path = Path(constants.Locations.BASE_LOCATION_OF_SESSIONS, file)
with open(file_path, 'r') as f:
- num = num + 1
+ num += 1
namespace_objs: XSessionConfig = json.load(f, object_hook=lambda d: Namespace(**d))
print(str(num) +'. ' + namespace_objs.session_name,
namespace_objs.session_create_time,
@@ -201,7 +201,7 @@ class ArgumentsHandler():
# Print data according to declared order
ordered_variables = vars(XSessionConfigObject)['__annotations__']
for x_session_config_object in x_session_config_objects:
- count = count + 1
+ count += 1
print('%d.' % count)
# Get fields in declared order
diff --git a/xsession_manager/xsession_manager.py b/xsession_manager/xsession_manager.py
index ed8777e..f4ead2f 100755
--- a/xsession_manager/xsession_manager.py
+++ b/xsession_manager/xsession_manager.py
@@ -233,7 +233,7 @@ class XSessionManager:
while Gtk.events_pending():
Gtk.main_iteration()
- retry_count_down = retry_count_down - 1
+ retry_count_down -= 1
self._suppress_log_if_already_in_workspace = True
self.move_window(session_name)
@@ -281,7 +281,7 @@ class XSessionManager:
running_restores.append(index)
is_running = True
with self.instance_lock:
- self.restore_app_countdown = self.restore_app_countdown - 1
+ self.restore_app_countdown -= 1
break
if is_running:
continue
```
| Hi,
Sorry for late reply.
Thanks for the suggestion.
Would you please submit a PR?
:thought_balloon: Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
> ð Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
@elfring
I think it is OK to merge your PR to the main branch directly. I'll test it before merging anyway.
Please correct me if I understand you wrongly.
Does this feedback mean that you would like to integrate the change suggestion directly (without an extra pull/merge request from me)?
@elfring No, I'd like you commit a PR. :)
I'll merge your code later :)
Hi,
Sorry for late reply.
Thanks for the suggestion.
Would you please submit a PR?
:thought_balloon: Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
> ð Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
@elfring
I think it is OK to merge your PR to the main branch directly. I'll test it before merging anyway.
Please correct me if I understand you wrongly.
Does this feedback mean that you would like to integrate the change suggestion directly (without an extra pull/merge request from me)?
@elfring No, I'd like you commit a PR. :)
I'll merge your code later :) | 2022-08-07T13:15:01 | 0.0 | [] | [] |
||
adafruit/Adafruit_CircuitPython_SCD4X | adafruit__Adafruit_CircuitPython_SCD4X-12 | ea17ea7cddccf7452ce98f6628147bf06c4cf351 | diff --git a/adafruit_scd4x.py b/adafruit_scd4x.py
index 3f5bc32..22ed6b8 100644
--- a/adafruit_scd4x.py
+++ b/adafruit_scd4x.py
@@ -208,7 +208,7 @@ def data_ready(self):
"""Check the sensor to see if new data is available"""
self._send_command(_SCD4X_DATAREADY, cmd_delay=0.001)
self._read_reply(self._buffer, 3)
- return not ((self._buffer[0] & 0x03 == 0) and (self._buffer[1] == 0))
+ return not ((self._buffer[0] & 0x07 == 0) and (self._buffer[1] == 0))
@property
def serial_number(self):
| get_data_ready_status should watch 11 bits (not 10)
Section 3.8.2 of the SCD4x datasheet says to check the least significant 11 bits of the returned status. But the code on line 211 in "data_ready" only checks 8+2 = 10 bits. I've never ever seen that unchecked bit get set when data was not ready, but just in case it ever does -- we should AND the MSB with 7 (3 bits + 8) instead of 3 (2 bits + 8) to follow the datasheet.
| That's a good catch. Thank you for the really direct references, it was really easy to look up and see. I tried going through the Sensirion provided c code to see what they use, but c is not my specialty. I've got a sensor so I should be able to give it a shot regardless
! can y'all submit a PR?
Yup! I should have one in tonight | 2022-01-21T06:03:02 | 0.0 | [] | [] |
||
ContextLab/davos | ContextLab__davos-95 | 4a3be140f5b77e8e01cdc964bf07972a2c9afbd5 | diff --git a/davos/core/project.py b/davos/core/project.py
index a4b39207..f86cc65a 100644
--- a/davos/core/project.py
+++ b/davos/core/project.py
@@ -49,6 +49,7 @@
import os
import shutil
import sys
+import warnings
from os.path import expandvars
from pathlib import Path
from urllib.request import urlopen
@@ -202,7 +203,7 @@ def _refresh_installed_pkgs(self):
cmd = (
f'{config.pip_executable} list '
'--disable-pip-version-check '
- f'--path {self.site_packages_dir} '
+ f'--path "{self.site_packages_dir}" '
f'--format json'
)
pip_list_stdout = run_shell_command(cmd, live_stdout=False)
@@ -678,6 +679,15 @@ def get_notebook_path():
notebook_relpath = unquote(session['notebook']['path'])
return f'{nbserver_root_dir}/{notebook_relpath}'
+ # VS Code doesn't actually start a Jupyter server when connecting to
+ # kernels, so the Jupyter API won't work. Fortunately, it's easy to
+ # check if the notebook is being run through VS Code, and to get its
+ # absolute path, if so.
+ # environment variable defined only if running in VS Code
+ if os.getenv('VSCODE_PID') is not None:
+ # global variable that holds absolute path to notebook file
+ return config.ipython_shell.user_ns['__vsc_ipynb_file__']
+
# shouldn't ever get here, but just in case
raise RuntimeError("Could not find notebook path for current kernel")
@@ -855,10 +865,24 @@ def use_default_project():
if isinstance(config._ipython_shell, TerminalInteractiveShell):
proj_name = "ipython-shell"
else:
- proj_name = get_notebook_path()
+ try:
+ proj_name = get_notebook_path()
+ except RuntimeError:
+ # failed to identify the notebook's name/path for some
+ # reason. This may happen if the notebook is being run
+ # through an IDE or other application that accesses the
+ # notebook kernel in a non-standard way, such that the
+ # Jupyter server is never launched. In this case, fall back
+ # to a generic project so smuggled packages are still
+ # isolated from the user's main environment
+ proj_name = "davos-fallback"
+ warnings.warn(
+ "Failed to identify notebook path. Falling back to generic "
+ "default project"
+ )
# will always be an absolute path to a real Jupyter notebook file,
- # or name of real Colab notebook, so we can skip project type
- # decision logic
+ # name of real Colab notebook, or one of the non-path strings
+ # explicitly set above, so we can skip project type decision logic
default_project = ConcreteProject(proj_name)
config.project = default_project
diff --git a/paper/main.tex b/paper/main.tex
index 6f69c84e..61896d69 100644
--- a/paper/main.tex
+++ b/paper/main.tex
@@ -389,7 +389,7 @@ \subsubsection{Projects}\label{subsec:projects}
Standard approaches to installing packages from within a notebook can alter the local Python environment in potentially unexpected and undesired ways. For example, running a notebook that installs its dependencies via system shell commands (prefixed with ``\texttt{!}'') or IPython magic commands (prefixed with ``\texttt{\%}'') may cause other existing packages in the user's environment to be uninstalled and replaced with alternate versions. This can lead to incompatibilities between installed packages, affect the behavior of the user's other scripts or notebooks, or even interfere with system applications.
-To prevent Davos-enhanced notebooks from having unwanted side-effects on the user's environment, Davos automatically isolates packages installed via \texttt{smuggle} statements using a custom scheme called ``projects.'' Functionally, a Davos project is similar to a standard Python virtual environment (e.g., created with the standard library's \texttt{venv} module or a third-party tool like \texttt{virtualenv}~\cite{BickEtal07}): it consists of a directory (within a hidden \texttt{.davos} folder in the user's home directory) that houses third-party packages needed for a particular project or task. However, Davos projects do not need to be manually activated and deactivated, do not contain separate Python or \texttt{pip} executables, and \textit{extend} the user's main Python environment rather than replace it.
+To prevent Davos-enhanced notebooks from having unwanted side-effects on the user's environment, Davos automatically isolates packages installed via \texttt{smuggle} statements using a custom scheme called ``projects.'' Functionally, a Davos project is similar to a standard Python virtual environment (e.g., created with the standard library's \texttt{venv} module or a third-party tool like \texttt{virtualenv}~\cite{BickEtal07}): it consists of a directory (within a hidden \texttt{.davos} folder in the user's home directory) that houses third-party packages needed for a particular project or task. However, unlike standard virtual environments, Davos projects do not need to be manually activated and deactivated, do not contain separate Python or \texttt{pip} executables, and \textit{extend} the user's main Python environment rather than replace it.
When Davos is imported into a notebook, a notebook-specific project directory is automatically created (if it does not exist already).
%When Davos is imported into a notebook, a notebook-specific project directory is automatically created (if it does not exist already), named for the absolute path to the notebook file.
@@ -537,7 +537,7 @@ \subsubsection{Configuring and querying Davos}\label{subsec:config}
program throws an error, both its stdout and stderr streams will be
displayed alongside the Python traceback to allow for debugging.
-\item \texttt{.project}: This attribute is a string that specifies the name of
+\item \texttt{.project}: \textcolor{red}{\textbf{TODO: fix this}} This attribute is a string that specifies the name of
the ``project'' associated with the current notebook. As described in
Section~\ref{subsec:projects}, a notebook's project determines where and how
any \texttt{smuggle}d dependencies are installed if they are not available in
diff --git a/setup.cfg b/setup.cfg
index 07da349b..53298318 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = davos
-version = 0.2.2
+version = 0.2.3
description = Install and manage Python packages at runtime using the "smuggle" statement.
long_description = file: README.md
long_description_content_type = text/markdown
| `get_notebook_path` doesn't work when running with VSCode
I was attempting to run some Jupyter notebooks locally (testing Chatify for Neuromatch), and found that `davos` can't be imported if the notebooks are run in VSCode. The `get_notebook_path` function does not work with VSCode as the notebooks do not appear when running the command `jupyter notebook list`.
The error is: `RuntimeError: Could not find notebook path for current kernel`.
I appreciate this is probably not a pressing concern, but might be good to note, particularly to Neuromatch students.
| thanks @le-big-mac --- we're on it! we were able to replicate this issue in VS Code and we're working on a plan for handling it. | 2023-09-06T04:34:44 | 0.0 | [] | [] |
||
adafruit/circup | adafruit__circup-205 | 38dd524b453aaab1a6f12507ea77493440838606 | diff --git a/circup/backends.py b/circup/backends.py
index 754cbac..4e2d1b7 100644
--- a/circup/backends.py
+++ b/circup/backends.py
@@ -589,14 +589,31 @@ def get_file_path(self, filename):
def is_device_present(self):
"""
- returns True if the device is currently connected
+ returns True if the device is currently connected and running supported version
"""
try:
- _ = self.session.get(f"{self.device_location}/cp/version.json")
- return True
+ with self.session.get(f"{self.device_location}/cp/version.json") as r:
+ r.raise_for_status()
+ web_api_version = r.json().get("web_api_version")
+ if web_api_version is None:
+ self.logger.error("Unable to get web API version from device.")
+ click.secho("Unable to get web API version from device.", fg="red")
+ return False
+
+ if web_api_version < 4:
+ self.logger.error(
+ f"Device running unsupported web API version {web_api_version} < 4."
+ )
+ click.secho(
+ f"Device running unsupported web API version {web_api_version} < 4.",
+ fg="red",
+ )
+ return False
except requests.exceptions.ConnectionError:
return False
+ return True
+
def get_device_versions(self):
"""
Returns a dictionary of metadata from modules on the connected device.
| web workflow does not work with CP 8.2.x due to API differences
Trying web workflow against ESP32 Feather V2 running CP 8.2.10, freshly installed:
```
$ ./venv/bin/circup --verbose --host 172.40.0.11 --password XXX list
Logging to /Users/foo/Library/Logs/circup/circup.log
03/06/2024 22:03:23 INFO: ### Started Circup ###
03/06/2024 22:03:23 INFO: Checking for a newer version of circup
03/06/2024 22:03:23 INFO: Requesting redirect information: https://github.com/adafruit/circuitpython/releases/latest
03/06/2024 22:03:23 INFO: Tag: '8.2.10'
Found device at http://:[email protected], running CircuitPython 8.2.10.
03/06/2024 22:03:24 INFO: List
03/06/2024 22:03:24 INFO: Using bundles: adafruit/Adafruit_CircuitPython_Bundle, adafruit/CircuitPython_Community_Bundle, circuitpython/CircuitPython_Org_Bundle
03/06/2024 22:03:24 ERROR: list indices must be integers or slices, not str
Traceback (most recent call last):
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/__init__.py", line 598, in find_modules
device_modules = backend.get_device_versions()
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 608, in get_device_versions
return self.get_modules(urljoin(self.device_location, self.LIB_DIR_PATH))
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 64, in get_modules
return self._get_modules(device_url)
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 367, in _get_modules
return self._get_modules_http(device_lib_path)
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 388, in _get_modules_http
for entry in r.json()["files"]:
TypeError: list indices must be integers or slices, not str
There was a problem: list indices must be integers or slices, not str
```
Checking the traffic dump the JSON returned for the request is merely an array.
It works with CP 9.0.0 beta2.
| I wonder if the web workflow API is documented somewhere.
https://docs.circuitpython.org/en/latest/docs/workflows.html#web
Thanks. I almost forgot that I used that for the initial set of changes of the web workflow support.
On Thu, Mar 7, 2024, at 14:36, anecdata wrote:
>
>
> https://docs.circuitpython.org/en/latest/docs/workflows.html#web
>
>
> â
> Reply to this email directly, view it on GitHub <https://github.com/adafruit/circup/issues/204#issuecomment-1983523831>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/AAWMMDBNWQUIJFPDJ3CKFPTYXBUPFAVCNFSM6AAAAABEJZ5ADOVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMYTSOBTGUZDGOBTGE>.
> You are receiving this because you authored the thread.Message ID: ***@***.***>
>
The difference is that CP 8.2.10 has `web_api_version` = 2 and returns `[]` on empty directory listing, while CP 9.0.0 has `web_api_version` = 4 and returns dictionary/object with the `files` value being empty array.
I think the answer to this is to check the API version and refuse anything strictly lower than 4. | 2024-03-07T20:41:57 | 0.0 | [] | [] |
||
ami-iit/adam | ami-iit__adam-63 | 4f36ed48c6c244b19318177906e0cb6ac9634332 | diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml
index 8ba32be..41be665 100644
--- a/.github/workflows/black.yml
+++ b/.github/workflows/black.yml
@@ -1,6 +1,7 @@
name: Black action
on:
+ pull_request:
push:
branches:
- main
@@ -9,11 +10,11 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: black
- uses: lgeiger/[email protected]
+ uses: psf/black@stable
with:
- args: .
+ options: "--check --verbose"
- name: Check for modified files
id: git-check
run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)
diff --git a/src/adam/model/std_factories/std_model.py b/src/adam/model/std_factories/std_model.py
index 41d8af6..ffeff71 100644
--- a/src/adam/model/std_factories/std_model.py
+++ b/src/adam/model/std_factories/std_model.py
@@ -7,11 +7,12 @@
from adam.core.spatial_math import SpatialMath
from adam.model import ModelFactory, StdJoint, StdLink
+
def urdf_remove_sensors_tags(xml_string):
# Parse the XML string
root = ET.fromstring(xml_string)
- # Find and remove all tags named "sensor" that are child of
+ # Find and remove all tags named "sensor" that are child of
# root node (i.e. robot)
for sensors_tag in root.findall("sensor"):
root.remove(sensors_tag)
@@ -21,6 +22,7 @@ def urdf_remove_sensors_tags(xml_string):
return modified_xml_string
+
class URDFModelFactory(ModelFactory):
"""This factory generates robot elements from urdf_parser_py
@@ -36,17 +38,19 @@ def __init__(self, path: str, math: SpatialMath):
raise FileExistsError(path)
# Read URDF, but before passing it to urdf_parser_py get rid of all sensor tags
- # sensor tags are valid elements of URDF (see ),
+ # sensor tags are valid elements of URDF (see ),
# but they are ignored by urdf_parser_py, that complains every time it sees one.
# As there is nothing to be fixed in the used models, and it is not useful
# to have a useless and noisy warning, let's remove before hands all the sensor elements,
# that anyhow are not parser by urdf_parser_py or adam
# See https://github.com/ami-iit/ADAM/issues/59
- xml_file = open(path, 'r')
+ xml_file = open(path, "r")
xml_string = xml_file.read()
xml_file.close()
xml_string_without_sensors_tags = urdf_remove_sensors_tags(xml_string)
- self.urdf_desc = urdf_parser_py.urdf.URDF.from_xml_string(xml_string_without_sensors_tags)
+ self.urdf_desc = urdf_parser_py.urdf.URDF.from_xml_string(
+ xml_string_without_sensors_tags
+ )
self.name = self.urdf_desc.name
def get_joints(self) -> List[StdJoint]:
| Black action is failing
The github action that verifies that the code is formatted following [black](https://github.com/psf/black) standards is failing.
See https://github.com/ami-iit/adam/actions/runs/7221531028
| Have you considered using the [`psf/black@stable`](https://github.com/psf/black/blob/main/action/main.py) action from the original `black` repo? | 2024-01-12T12:04:32 | 0.0 | [] | [] |
||
ymcui/Chinese-LLaMA-Alpaca | ymcui__Chinese-LLaMA-Alpaca-555 | 69045db949000b1c635b4b36b26e048f9d57580f | diff --git a/scripts/training/run_clm_pt_with_peft.py b/scripts/training/run_clm_pt_with_peft.py
index e8ad9f7..67ef2c6 100644
--- a/scripts/training/run_clm_pt_with_peft.py
+++ b/scripts/training/run_clm_pt_with_peft.py
@@ -57,6 +57,29 @@
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
+from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
+
+
+class SavePeftModelCallback(transformers.TrainerCallback):
+ def save_model(self, args, state, kwargs):
+ if state.best_model_checkpoint is not None:
+ checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
+ else:
+ checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
+
+ peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
+ def on_save(self, args, state, control, **kwargs):
+ self.save_model(args, state, kwargs)
+ return control
+
+ def on_train_end(self, args, state, control, **kwargs):
+ peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
@@ -64,6 +87,8 @@ def accuracy(predictions, references, normalize=True, sample_weight=None):
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
+
+
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
@@ -72,6 +97,7 @@ def compute_metrics(eval_preds):
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
+
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
@@ -126,24 +152,6 @@ def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
return batch
-class GroupTextsBuilder:
- def __init__(self,max_seq_length):
- self.max_seq_length = max_seq_length
- def __call__(self, examples):
- # Concatenate all texts.
- firsts = {k:examples[k][0][0] for k in examples.keys()}
- lasts = {k:examples[k][0][-1] for k in examples.keys()}
- contents = {k:sum([vi[1:-1] for vi in v],[]) for k,v in examples.items()}
- total_length = len(contents[list(examples.keys())[0]])
-
- content_length = self.max_seq_length - 2
- if total_length >= content_length:
- total_length = (total_length // content_length ) * content_length
- # Split by chunks of max_len.
- result = {
- k: [ [firsts[k]] + t[i : i + content_length] + [lasts[k]] for i in range(0, total_length, content_length)] for k, t in contents.items()}
- return result
-
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@@ -297,6 +305,7 @@ def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
+
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
@@ -307,8 +316,10 @@ class MyTrainingArguments(TrainingArguments):
debug_mode : Optional[bool] = field(default=False)
peft_path : Optional[str] = field(default=None)
+
logger = logging.getLogger(__name__)
+
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
@@ -576,7 +587,7 @@ def group_texts(examples):
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
-
+ trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
@@ -585,7 +596,6 @@ def group_texts(examples):
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
- trainer.save_model()
metrics = train_result.metrics
@@ -598,19 +608,6 @@ def group_texts(examples):
trainer.save_metrics("train", metrics)
trainer.save_state()
- import shutil
- from transformers.modeling_utils import unwrap_model
- lora_path=os.path.join(training_args.output_dir,'pt_lora_model')
- os.makedirs(lora_path, exist_ok=True)
- try:
- unwrap_model(model).peft_config.save_pretrained(lora_path)
- except AttributeError:
- unwrap_model(model).peft_config['default'].save_pretrained(lora_path)
- shutil.copyfile(
- os.path.join(training_args.output_dir,'pytorch_model.bin'),
- os.path.join(lora_path,'adapter_model.bin'))
- tokenizer.save_pretrained(lora_path)
-
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
@@ -629,7 +626,5 @@ def group_texts(examples):
trainer.save_metrics("eval", metrics)
-
-
if __name__ == "__main__":
main()
diff --git a/scripts/training/run_clm_sft_with_peft.py b/scripts/training/run_clm_sft_with_peft.py
index 276de40..21f108d 100644
--- a/scripts/training/run_clm_sft_with_peft.py
+++ b/scripts/training/run_clm_sft_with_peft.py
@@ -53,8 +53,7 @@
from transformers.utils.versions import require_version
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
-
-
+from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
IGNORE_INDEX = -100
@@ -69,6 +68,27 @@
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
+class SavePeftModelCallback(transformers.TrainerCallback):
+ def save_model(self, args, state, kwargs):
+ if state.best_model_checkpoint is not None:
+ checkpoint_folder = os.path.join(state.best_model_checkpoint, "sft_lora_model")
+ else:
+ checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
+
+ peft_model_path = os.path.join(checkpoint_folder, "sft_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
+ def on_save(self, args, state, control, **kwargs):
+ self.save_model(args, state, kwargs)
+ return control
+
+ def on_train_end(self, args, state, control, **kwargs):
+ peft_model_path = os.path.join(args.output_dir, "sft_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
+
@dataclass
class ModelArguments:
"""
@@ -182,6 +202,7 @@ class DataTrainingArguments:
max_seq_length: Optional[int] = field(default=512)
+
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
@@ -192,8 +213,10 @@ class MyTrainingArguments(TrainingArguments):
peft_path : Optional[str] = field(default=None)
force_resize_embeddings: bool = field(default=False)
+
logger = logging.getLogger(__name__)
+
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
@@ -248,7 +271,6 @@ def main():
# Set seed before initializing model.
set_seed(training_args.seed)
-
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
@@ -292,7 +314,6 @@ def main():
eval_dataset=None
train_dataset = None
-
if training_args.do_train:
with training_args.main_process_first(desc="loading and tokenization"):
path = Path(data_args.dataset_dir)
@@ -321,7 +342,6 @@ def main():
logger.info("eval example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
-
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
@@ -338,7 +358,6 @@ def main():
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
-
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
@@ -381,7 +400,6 @@ def main():
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
-
# Initialize our Trainer
trainer = Trainer(
model=model,
@@ -391,6 +409,7 @@ def main():
tokenizer=tokenizer,
data_collator=data_collator,
)
+ trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
@@ -400,7 +419,6 @@ def main():
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
- trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
@@ -410,19 +428,6 @@ def main():
trainer.save_metrics("train", metrics)
trainer.save_state()
- import shutil
- from transformers.modeling_utils import unwrap_model
- lora_path=os.path.join(training_args.output_dir,'sft_lora_model')
- os.makedirs(lora_path, exist_ok=True)
- try:
- unwrap_model(model).peft_config.save_pretrained(lora_path)
- except AttributeError:
- unwrap_model(model).peft_config['default'].save_pretrained(lora_path)
- shutil.copyfile(
- os.path.join(training_args.output_dir,'pytorch_model.bin'),
- os.path.join(lora_path,'adapter_model.bin'))
- tokenizer.save_pretrained(lora_path)
-
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
@@ -439,8 +444,6 @@ def main():
trainer.save_metrics("eval", metrics)
-
-
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
@@ -464,5 +467,6 @@ def smart_tokenizer_and_embedding_resize(
output_embeddings[-num_new_tokens:] = output_embeddings_avg
return num_new_tokens
+
if __name__ == "__main__":
main()
| è®ç»ä¹åä¿åæ件æ¶æ示æ¥éï¼è¯´output/pytorch_model.bin ä¸åå¨
### 详ç»æè¿°é®é¢
è®ç»ä¹åä¿åæ件æ¶æ示æ¥éï¼è¯´output/pytorch_model.bin ä¸åå¨ï¼ä½å®é
æ件æ¯åå¨ç
<img width="762" alt="image" src="https://github.com/ymcui/Chinese-LLaMA-Alpaca/assets/6229526/517ca191-bb60-4e10-bd20-66c3594fea4f">
è®ç»å½ä»¤
```
wandb disabled
lr=2e-4
lora_rank=64
lora_alpha=128
lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj"
modules_to_save="embed_tokens,lm_head"
lora_dropout=0.05
pretrained_model=/data/llama/7B
#pretrained_model=/data/output/chinese-llama-alpaca-7b-lora
chinese_tokenizer_path=/data/chinese-llama-plus-lora-7b
#chinese_tokenizer_path=/data/output/chinese-llama-alpaca-7b-lora
dataset_dir=/data/pt_data/
data_cache=/data/temp_data_cache_dir/
per_device_train_batch_size=1
per_device_eval_batch_size=1
training_steps=100
gradient_accumulation_steps=1
output_dir=/data/output/chinese-llama-lora-7b-0609-v1.1
deepspeed_config_file=ds_zero2_no_offload.json
torchrun --nnodes 1 --nproc_per_node 2 run_clm_pt_with_peft.py \
--deepspeed ${deepspeed_config_file} \
--model_name_or_path ${pretrained_model} \
--tokenizer_name_or_path ${chinese_tokenizer_path} \
--dataset_dir ${dataset_dir} \
--data_cache_dir ${data_cache} \
--validation_split_percentage 0.001 \
--per_device_train_batch_size ${per_device_train_batch_size} \
--per_device_eval_batch_size ${per_device_eval_batch_size} \
--do_train \
--seed $RANDOM \
--fp16 \
--max_steps ${training_steps} \
--lr_scheduler_type cosine \
--learning_rate ${lr} \
--warmup_ratio 0.05 \
--weight_decay 0.01 \
--logging_strategy steps \
--logging_steps 10 \
--save_strategy steps \
--save_total_limit 100 \
--save_steps 1200 \
--gradient_accumulation_steps ${gradient_accumulation_steps} \
--preprocessing_num_workers 8 \
--block_size 512 \
--output_dir ${output_dir} \
--overwrite_output_dir \
--ddp_timeout 30000 \
--logging_first_step True \
--lora_rank ${lora_rank} \
--lora_alpha ${lora_alpha} \
--trainable ${lora_trainable} \
--modules_to_save ${modules_to_save} \
--lora_dropout ${lora_dropout} \
--torch_dtype float16 \
--gradient_checkpointing \
--ddp_find_unused_parameters False
```
### åèä¿¡æ¯
CPU 64æ ¸
å
å 256G
æ¾å¡ ä¸¤å¼ A30
#### ä¾èµæ
åµï¼ä»£ç ç±»é®é¢å¡å¿
æä¾ï¼
```
transformers 4.28.1
peft 0.3.0
torch 2.0.1
deepspeed 0.9.2
```
#### è¿è¡æ¥å¿ææªå¾
<img width="1257" alt="image" src="https://github.com/ymcui/Chinese-LLaMA-Alpaca/assets/6229526/2502bd86-76d8-4181-b9b9-933267e35969">
### å¿
æ¥é¡¹ç®
*å°[ ]ä¸å¡«å
¥xï¼è¡¨ç¤ºæ对é©ãæé®æ¶å é¤è¿è¡ã åä¸é¡¹è¯·æèªå·±çé®é¢ç±»åä¿ç符åçé项ã*
- [x] **åºç¡æ¨¡å**ï¼LLaMA-Plus 7B
- [x] **è¿è¡ç³»ç»**ï¼Linux
- [x] **é®é¢åç±»**ï¼æ¨¡åè®ç»ä¸ç²¾è°
- [x] **模åæ£ç¡®æ§æ£æ¥**ï¼å¡å¿
æ£æ¥æ¨¡åç[SHA256.md](https://github.com/ymcui/Chinese-LLaMA-Alpaca/blob/main/SHA256.md)ï¼æ¨¡åä¸å¯¹çæ
åµä¸æ æ³ä¿è¯ææåæ£å¸¸è¿è¡ã
- [x] ï¼å¿
éï¼ç±äºç¸å
³ä¾èµé¢ç¹æ´æ°ï¼è¯·ç¡®ä¿æç
§[Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki)ä¸çç¸å
³æ¥éª¤æ§è¡
- [x] ï¼å¿
éï¼æå·²é
读[FAQç« è](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/常è§é®é¢)并ä¸å·²å¨Issueä¸å¯¹é®é¢è¿è¡äºæç´¢ï¼æ²¡ææ¾å°ç¸ä¼¼é®é¢å解å³æ¹æ¡
- [ ] ï¼å¿
éï¼ç¬¬ä¸æ¹æ件é®é¢ï¼ä¾å¦[llama.cpp](https://github.com/ggerganov/llama.cpp)ã[text-generation-webui](https://github.com/oobabooga/text-generation-webui)ã[LlamaChat](https://github.com/alexrozanski/LlamaChat)çï¼åæ¶å»ºè®®å°å¯¹åºç项ç®ä¸æ¥æ¾è§£å³æ¹æ¡
| deepspeed使ç¨çåªä¸ªçæ¬äºï¼
>
deepspeed 0.9.2
æä¹åºç°äºè¿ä¸ªé®é¢ï¼ç¨çæ¯zero3çç¥ï¼ä¿åçpytorch_model.binæ¯13Gï¼ä¿åé度ç¼æ
¢ï¼å¯¼è´å¤å¶æ件æ¶ï¼pytorch_model.binè¿æ²¡æçæåºæ¥ã
æ注éäºå¤å¶æ件è¿ä¸è¡ï¼ç¶åèªè¡å¤å¶çpytorch_model.binã
è¿ä¸ªæä¹éå°è¿ï¼åºè¯¥ææ°ççBUGï¼ä¿®æ¹äºèæ¬run_clm_sft_with_peft.pyåæ§è¡å°±å¥½äº

@ymcui @airaria 建议å主修å¤ä¸ | 2023-06-10T02:33:39 | 0.0 | [] | [] |
||
griffithlab/VAtools | griffithlab__VAtools-50 | 9cc919797e5c8a58b264bdb4257bac853e31c882 | diff --git a/vatools/ref_transcript_mismatch_reporter.py b/vatools/ref_transcript_mismatch_reporter.py
index 5fcfe10..311e5a5 100644
--- a/vatools/ref_transcript_mismatch_reporter.py
+++ b/vatools/ref_transcript_mismatch_reporter.py
@@ -16,6 +16,8 @@ def resolve_consequence(consequence_string):
if 'start_lost' in consequences:
consequence = None
+ elif 'stop_retained_variant' in consequences:
+ consequence = None
elif 'frameshift_variant' in consequences:
consequence = 'FS'
elif 'missense_variant' in consequences:
@@ -137,10 +139,18 @@ def main(args_input = sys.argv[1:]):
wildtype_amino_acid = wildtype_amino_acid.split('X')[0]
if key == 'Protein_position':
protein_position = value
+ if '/' in value:
+ protein_position = value.split('/')[0]
+ if protein_position == '-':
+ protein_position = value.split('/')[1]
if key == 'Consequence':
variant_type = resolve_consequence(value)
if key == 'Feature':
transcript = value
+
+ if '*' in full_wildtype_sequence:
+ continue
+
if variant_type == 'missense' or variant_type == 'inframe_ins':
if '-' in protein_position:
position = int(protein_position.split('-', 1)[0]) - 1
@@ -153,6 +163,9 @@ def main(args_input = sys.argv[1:]):
else:
continue
+ if position == '-':
+ continue
+
if wildtype_amino_acid != '-':
processable_transcript_count += 1
processable_variant = True
| ref_transcript_mismatch_reporter: `ValueError: invalid literal for int() with base 10: '71/98'`
I get the following error when running vatools
```
Traceback (most recent call last):
File "/home/el/miniconda3/envs/vatools/bin/ref-transcript-mismatch-reporter", line 8, in <module>
sys.exit(main())
File "/home/el/miniconda3/envs/vatools/lib/python3.8/site-packages/vatools/ref_transcript_mismatch_reporter.py", line 148, in main
position = int(protein_position) - 1
ValueError: invalid literal for int() with base 10: '71/98'
```
I ran `pip show vatools` to make sure it works and got the following output:
```
Name: vatools
Version: 5.0.0
Summary: A tool for annotating VCF files with expression and readcount data
Home-page: https://github.com/griffithlab/vatools
Author: Susanna Kiwala, Chris Miller
Author-email: [email protected]
License: MIT License
Location: /home/el/miniconda3/envs/vatools/lib/python3.8/site-packages
Requires: vcfpy, pysam, gtfparse, testfixtures, pandas
Required-by:
```
Could these issues be related?
regards
El
_Originally posted by @iichelhadi in https://github.com/griffithlab/pVACtools/issues/692#issuecomment-902470748_
| 2021-08-25T19:33:58 | 0.0 | [] | [] |
|||
Hyundai-Kia-Connect/hyundai_kia_connect_api | Hyundai-Kia-Connect__hyundai_kia_connect_api-201 | 0a8c16cd348fe12e45c81f1aec9f9885b3b581b8 | diff --git a/README.rst b/README.rst
index f9f718f4..efb9b429 100644
--- a/README.rst
+++ b/README.rst
@@ -15,7 +15,7 @@ Python 3.9 or newer is required to use this package. Vehicle manager is the key
brand: int,
username: str
password: str
- pin: str (required for CA, and potentially USA, otherwise pass a blank string)
+ pin: str (required for CA, and potentially USA, otherwise pass a blank string)
Key values for the int exist in the constant(https://github.com/fuatakgun/hyundai_kia_connect_api/blob/master/hyundai_kia_connect_api/const.py) file as::
@@ -60,3 +60,28 @@ If geolocation is required you can also allow this by running::
This will populate the address of the vehicle in the vehicle instance.
+The Bluelink App is reset to English for users who have set another language in the Bluelink App in Europe when using hyundai_kia_connect_api.
+To avoid this, you can pass the optional parameter language (default is "en") to the constructor of VehicleManager, e.g. for Dutch:
+ vm = VehicleManager(region=2, brand=1, username="[email protected]", password="password", pin="1234", language="nl")
+
+Note: this is only implemented for Europe currently.
+[For a list of language codes, see here.](https://www.science.co.il/language/Codes.php). Currently in Europe the Bluelink App shows the following languages:
+- "en" English
+- "de" German
+- "fr" French
+- "it" Italian
+- "es" Spanish
+- "sv" Swedish
+- "nl" Dutch
+- "no" Norwegian
+- "cs" Czech
+- "sk" Slovak
+- "hu" Hungarian
+- "da" Danish
+- "pl" Polish
+- "fi" Finnish
+- "pt" Portuguese
+
+
+
+
diff --git a/hyundai_kia_connect_api/HyundaiBlueLinkAPIUSA.py b/hyundai_kia_connect_api/HyundaiBlueLinkAPIUSA.py
index f2016f30..8274a3ef 100644
--- a/hyundai_kia_connect_api/HyundaiBlueLinkAPIUSA.py
+++ b/hyundai_kia_connect_api/HyundaiBlueLinkAPIUSA.py
@@ -44,7 +44,9 @@ def __init__(
self,
region: int,
brand: int,
+ language: str
):
+ self.LANGUAGE: str = language
self.BASE_URL: str = "api.telematics.hyundaiusa.com"
self.LOGIN_API: str = "https://" + self.BASE_URL + "/v2/ac/"
self.API_URL: str = "https://" + self.BASE_URL + "/ac/v2/"
@@ -448,7 +450,7 @@ def start_climate(
headers["username"] = token.username
headers["blueLinkServicePin"] = token.pin
_LOGGER.debug(f"{DOMAIN} - Start engine headers: {headers}")
-
+
if options.climate is None:
options.climate = True
if options.set_temp is None:
@@ -459,8 +461,8 @@ def start_climate(
options.heating = 0
if options.defrost is None:
options.defrost = False
-
-
+
+
data = {
"Ims": 0,
"airCtrl": int(options.climate),
diff --git a/hyundai_kia_connect_api/KiaUvoAPIUSA.py b/hyundai_kia_connect_api/KiaUvoAPIUSA.py
index 08309c71..f11146fd 100644
--- a/hyundai_kia_connect_api/KiaUvoAPIUSA.py
+++ b/hyundai_kia_connect_api/KiaUvoAPIUSA.py
@@ -84,7 +84,9 @@ def __init__(
self,
region: int,
brand: int,
+ language
) -> None:
+ self.LANGUAGE: str = language
self.temperature_range = range(62, 82)
# Randomly generate a plausible device id on startup
@@ -219,16 +221,16 @@ def refresh_vehicles(self, token: Token, vehicles: list[Vehicle]) -> None:
def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> None:
state = self._get_cached_vehicle_state(token, vehicle)
- self._update_vehicle_properties(vehicle, state)
-
+ self._update_vehicle_properties(vehicle, state)
+
def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
self._get_forced_vehicle_state(token, vehicle)
#TODO: Force update needs work to return the correct data for processing
#self._update_vehicle_properties(vehicle, state)
#Temp call a cached state since we are removing this from parent logic in other commits should be removed when the above is fixed
self.update_vehicle_with_cached_state(token, vehicle)
-
- def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
+
+ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
"""Get cached vehicle data and update Vehicle instance with it"""
vehicle.last_updated_at = self.get_last_updated_at(
get_child_value(state, "vehicleStatus.syncDate.utc")
@@ -356,7 +358,7 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
get_child_value(state, "nextService.value"),
DISTANCE_UNITS[get_child_value(state, "nextService.unit")],
)
-
+
vehicle.data = state
def get_last_updated_at(self, value) -> dt.datetime:
@@ -470,7 +472,7 @@ def _get_forced_vehicle_state(self, token: Token, vehicle: Vehicle) -> dict:
token=token, url=url, json_body=body, vehicle=vehicle
)
response_body = response.json()
-
+
def check_last_action_status(self, token: Token, vehicle: Vehicle, action_id: str):
url = self.API_URL + "cmm/gts"
diff --git a/hyundai_kia_connect_api/KiaUvoApiCA.py b/hyundai_kia_connect_api/KiaUvoApiCA.py
index 38159ae8..6624e98c 100644
--- a/hyundai_kia_connect_api/KiaUvoApiCA.py
+++ b/hyundai_kia_connect_api/KiaUvoApiCA.py
@@ -37,8 +37,8 @@ class KiaUvoApiCA(ApiImpl):
temperature_range_c_new = [x * 0.5 for x in range(28, 64)]
temperature_range_model_year = 2020
- def __init__(self, region: int, brand: int) -> None:
-
+ def __init__(self, region: int, brand: int, language: str) -> None:
+ self.LANGUAGE: str = language
if BRANDS[brand] == BRAND_KIA:
self.BASE_URL: str = "www.kiaconnect.ca"
elif BRANDS[brand] == BRAND_HYUNDAI:
@@ -102,7 +102,7 @@ def get_vehicles(self, token: Token) -> list[Vehicle]:
entry_engine_type = ENGINE_TYPES.ICE
elif(entry["fuelKindCode"] == "E"):
entry_engine_type = ENGINE_TYPES.EV
- elif(entry["fuelKindCode"] == "P"):
+ elif(entry["fuelKindCode"] == "P"):
entry_engine_type = ENGINE_TYPES.PHEV
vehicle: Vehicle = Vehicle(
id=entry["vehicleId"],
@@ -119,10 +119,10 @@ def get_vehicles(self, token: Token) -> list[Vehicle]:
def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> None:
state = self._get_cached_vehicle_state(token, vehicle)
self._update_vehicle_properties_base(vehicle, state)
-
- # Service Status Call
+
+ # Service Status Call
service = self._get_next_service(token, vehicle)
-
+
#Get location if the car has moved since last call
if vehicle.odometer:
if vehicle.odometer < get_child_value(service, "currentOdometer"):
@@ -131,14 +131,14 @@ def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> No
else:
location = self.get_location(token, vehicle)
self._update_vehicle_properties_location(vehicle, location)
-
- #Update service after the fact so we still have the old odometer reading available for above.
+
+ #Update service after the fact so we still have the old odometer reading available for above.
self._update_vehicle_properties_service(vehicle, service)
if vehicle.engine_type == ENGINE_TYPES.EV:
charge = self._get_charge_limits(token, vehicle)
self._update_vehicle_properties_charge(vehicle, charge)
-
-
+
+
def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
state = self._get_forced_vehicle_state(token, vehicle)
@@ -159,11 +159,11 @@ def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
f"{DOMAIN} - Set vehicle.timezone to UTC + {offset} hours"
)
- self._update_vehicle_properties_base(vehicle, state)
+ self._update_vehicle_properties_base(vehicle, state)
- # Service Status Call
+ # Service Status Call
service = self._get_next_service(token, vehicle)
-
+
#Get location if the car has moved since last call
if vehicle.odometer:
if vehicle.odometer < get_child_value(service, "currentOdometer"):
@@ -172,16 +172,16 @@ def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
else:
location = self.get_location(token, vehicle)
self._update_vehicle_properties_location(vehicle, location)
-
- #Update service after the fact so we still have the old odometer reading available for above.
+
+ #Update service after the fact so we still have the old odometer reading available for above.
self._update_vehicle_properties_service(vehicle, service)
-
+
if vehicle.engine_type == ENGINE_TYPES.EV:
charge = self._get_charge_limits(token, vehicle)
self._update_vehicle_properties_charge(vehicle, charge)
-
-
- def _update_vehicle_properties_base(self, vehicle: Vehicle, state: dict) -> None:
+
+
+ def _update_vehicle_properties_base(self, vehicle: Vehicle, state: dict) -> None:
_LOGGER.debug(f"{DOMAIN} - Old Vehicle Last Updated: {vehicle.last_updated_at}")
vehicle.last_updated_at = self.get_last_updated_at(
get_child_value(state, "status.lastStatusDate"),
@@ -196,7 +196,7 @@ def _update_vehicle_properties_base(self, vehicle: Vehicle, state: dict) -> None
else:
state["status"]["airTemp"]["value"] = self.temperature_range_c_old[tempIndex]
-
+
vehicle.total_driving_range = (
get_child_value(
state,
@@ -303,9 +303,9 @@ def _update_vehicle_properties_base(self, vehicle: Vehicle, state: dict) -> None
if vehicle.data is None:
vehicle.data = {}
vehicle.data["status"] = state["status"]
-
+
def _update_vehicle_properties_service(self, vehicle: Vehicle, state: dict) -> None:
-
+
vehicle.odometer = (
get_child_value(state, "currentOdometer"),
DISTANCE_UNITS[get_child_value(state, "currentOdometerUnit")],
@@ -318,20 +318,20 @@ def _update_vehicle_properties_service(self, vehicle: Vehicle, state: dict) -> N
get_child_value(state, "msopServiceOdometer"),
DISTANCE_UNITS[get_child_value(state, "msopServiceOdometerUnit")],
)
-
+
vehicle.data["service"] = state
-
+
def _update_vehicle_properties_location(self, vehicle: Vehicle, state: dict) -> None:
-
+
if get_child_value(state, "coord.lat"):
vehicle.location = (
get_child_value(state, "coord.lat"),
get_child_value(state, "coord.lon"),
get_child_value(state, "time"),
- )
+ )
vehicle.data["vehicleLocation"] = state
-
+
def get_last_updated_at(self, value, vehicle) -> dt.datetime:
m = re.match(r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})", value)
@@ -365,7 +365,7 @@ def _get_cached_vehicle_state(self, token: Token, vehicle: Vehicle) -> dict:
status["status"] = response
return status
-
+
def _get_forced_vehicle_state(self, token: Token, vehicle: Vehicle) -> dict:
url = self.API_URL + "rltmvhclsts"
headers = self.API_HEADERS
@@ -379,7 +379,7 @@ def _get_forced_vehicle_state(self, token: Token, vehicle: Vehicle) -> dict:
response = response["result"]["status"]
status = {}
status["status"] = response
-
+
return status
def _get_next_service(self, token: Token, vehicle: Vehicle) -> dict:
@@ -454,7 +454,7 @@ def start_climate(
) -> str:
if vehicle.engine_type == ENGINE_TYPES.EV:
url = self.API_URL + "evc/rfon"
- else:
+ else:
url = self.API_URL + "rmtstrt"
headers = self.API_HEADERS
headers["accessToken"] = token.access_token
@@ -476,10 +476,10 @@ def start_climate(
if options.front_right_seat is None:
options.front_right_seat = 0
if options.rear_left_seat is None:
- options.rear_left_seat = 0
+ options.rear_left_seat = 0
if options.rear_right_seat is None:
options.rear_right_seat = 0
-
+
if vehicle.year >= self.temperature_range_model_year:
hex_set_temp = get_index_into_hex_temp(
self.temperature_range_c_new.index(options.set_temp)
@@ -521,14 +521,14 @@ def start_climate(
response = requests.post(url, headers=headers, data=json.dumps(payload))
response_headers = response.headers
response = response.json()
-
+
_LOGGER.debug(f"{DOMAIN} - Received start_climate response {response}")
return response_headers["transactionId"]
def stop_climate(self, token: Token, vehicle: Vehicle) -> str:
if vehicle.engine_type == ENGINE_TYPES.EV:
url = self.API_URL + "evc/rfoff"
- else:
+ else:
url = self.API_URL + "rmtstp"
headers = self.API_HEADERS
headers["accessToken"] = token.access_token
@@ -595,22 +595,22 @@ def stop_charge(self, token: Token, vehicle: Vehicle) -> str:
_LOGGER.debug(f"{DOMAIN} - Received stop_charge response {response}")
return response_headers["transactionId"]
-
- def _update_vehicle_properties_charge(self, vehicle: Vehicle, state: dict) -> None:
+
+ def _update_vehicle_properties_charge(self, vehicle: Vehicle, state: dict) -> None:
vehicle.ev_charge_limits_ac = [x['level'] for x in state if x['plugType'] == 1][-1]
vehicle.ev_charge_limits_dc = [x['level'] for x in state if x['plugType'] == 0][-1]
-
+
def _get_charge_limits(self, token: Token, vehicle: Vehicle) -> dict:
url = self.API_URL + "evc/selsoc"
headers = self.API_HEADERS
headers["accessToken"] = token.access_token
headers["vehicleId"] = vehicle.id
-
- response = requests.post(url, headers=headers)
+
+ response = requests.post(url, headers=headers)
response = response.json()
return response["result"]
-
+
def set_charge_limits(self, token: Token, vehicle: Vehicle, ac: int, dc: int)-> str:
url = self.API_URL + "evc/setsoc"
headers = self.API_HEADERS
@@ -625,7 +625,7 @@ def set_charge_limits(self, token: Token, vehicle: Vehicle, ac: int, dc: int)->
},
{
"plugType": 1,
- "level": ac,
+ "level": ac,
}],
"pin": token.pin,
}
diff --git a/hyundai_kia_connect_api/KiaUvoApiEU.py b/hyundai_kia_connect_api/KiaUvoApiEU.py
index 70ab8521..9ecc9315 100644
--- a/hyundai_kia_connect_api/KiaUvoApiEU.py
+++ b/hyundai_kia_connect_api/KiaUvoApiEU.py
@@ -39,6 +39,24 @@
USER_AGENT_MOZILLA: str = "Mozilla/5.0 (Linux; Android 4.1.1; Galaxy Nexus Build/JRO03C) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19"
ACCEPT_HEADER_ALL: str = "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
+SUPPORTED_LANGUAGES_LIST = [
+ "en", # English
+ "de", # German
+ "fr", # French
+ "it", # Italian
+ "es", # Spanish
+ "sv", # Swedish
+ "nl", # Dutch
+ "no", # Norwegian
+ "cs", # Czech
+ "sk", # Slovak
+ "hu", # Hungarian
+ "da", # Danish
+ "pl", # Polish
+ "fi", # Finnish
+ "pt" # Portuguese
+]
+
def _check_response_for_errors(response: dict) -> None:
"""
@@ -77,9 +95,14 @@ class KiaUvoApiEU(ApiImpl):
data_timezone = tz.gettz("Europe/Berlin")
temperature_range = [x * 0.5 for x in range(28, 60)]
- def __init__(self, region: int, brand: int) -> None:
+ def __init__(self, region: int, brand: int, language: str) -> None:
self.stamps = None
+ if language not in SUPPORTED_LANGUAGES_LIST:
+ _LOGGER.warning(f"Unsupported language: {language}, fallback to en")
+ language = "en" # fallback to English
+ self.LANGUAGE: str = language
+
if BRANDS[brand] == BRAND_KIA:
self.BASE_DOMAIN: str = "prd.eu-ccapi.kia.com"
self.CCSP_SERVICE_ID: str = "fdc85c00-0a2f-4c64-bcb4-2cfb1500730a"
@@ -112,7 +135,7 @@ def __init__(self, region: int, brand: int) -> None:
+ auth_client_id
+ "&scope=openid%20profile%20email%20phone&response_type=code&hkid_session_reset=true&redirect_uri="
+ self.USER_API_URL
- + "integration/redirect/login&ui_locales=en&state=$service_id:$user_id"
+ + "integration/redirect/login&ui_locales=" + self.LANGUAGE + "&state=$service_id:$user_id"
)
elif BRANDS[brand] == BRAND_HYUNDAI:
auth_client_id = "64621b96-0f0d-11ec-82a8-0242ac130003"
@@ -123,7 +146,7 @@ def __init__(self, region: int, brand: int) -> None:
+ auth_client_id
+ "&scope=openid%20profile%20email%20phone&response_type=code&hkid_session_reset=true&redirect_uri="
+ self.USER_API_URL
- + "integration/redirect/login&ui_locales=en&state=$service_id:$user_id"
+ + "integration/redirect/login&ui_locales=" + self.LANGUAGE + "&state=$service_id:$user_id"
)
self.stamps_url: str = (
@@ -197,9 +220,9 @@ def get_vehicles(self, token: Token) -> list[Vehicle]:
entry_engine_type = ENGINE_TYPES.ICE
elif(entry["type"] == "EV"):
entry_engine_type = ENGINE_TYPES.EV
- elif(entry["type"] == "PHEV"):
+ elif(entry["type"] == "PHEV"):
entry_engine_type = ENGINE_TYPES.PHEV
- elif(entry["type"] == "HV"):
+ elif(entry["type"] == "HV"):
entry_engine_type = ENGINE_TYPES.HEV
vehicle: Vehicle = Vehicle(
id=entry["vehicleId"],
@@ -231,7 +254,7 @@ def get_last_updated_at(self, value) -> dt.datetime:
def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> None:
state = self._get_cached_vehicle_state(token, vehicle)
self._update_vehicle_properties(vehicle, state)
-
+
if vehicle.engine_type == ENGINE_TYPES.EV:
try:
state = self._get_driving_info(token, vehicle)
@@ -251,7 +274,7 @@ def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
state = self._get_forced_vehicle_state(token, vehicle)
state["vehicleLocation"] = self._get_location(token, vehicle)
self._update_vehicle_properties(vehicle, state)
- #Only call for driving info on cars we know have a chance of supporting it. Could be expanded if other types do support it.
+ #Only call for driving info on cars we know have a chance of supporting it. Could be expanded if other types do support it.
if vehicle.engine_type == ENGINE_TYPES.EV:
try:
state = self._get_driving_info(token, vehicle)
@@ -273,7 +296,7 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
)
else:
vehicle.last_updated_at = dt.datetime.now(self.data_timezone)
-
+
vehicle.total_driving_range = (
get_child_value(
state,
@@ -286,8 +309,8 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
)
],
)
-
- #Only update odometer if present. It isn't present in a force update. Dec 2022 update also reports 0 when the car is off. This tries to remediate best we can. Can be removed once fixed in the cars firmware.
+
+ #Only update odometer if present. It isn't present in a force update. Dec 2022 update also reports 0 when the car is off. This tries to remediate best we can. Can be removed once fixed in the cars firmware.
if get_child_value(state, "odometer.value") is not None:
if get_child_value(state, "odometer.value") != 0:
vehicle.odometer = (
@@ -335,7 +358,7 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
vehicle.steering_wheel_heater_is_on = False
elif steer_wheel_heat == 1:
vehicle.steering_wheel_heater_is_on = True
-
+
vehicle.back_window_heater_is_on = get_child_value(
state, "vehicleStatus.sideBackWindowHeat"
)
@@ -396,16 +419,16 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
vehicle.ev_battery_is_plugged_in = get_child_value(
state, "vehicleStatus.evStatus.batteryPlugin"
)
-
+
ev_charge_port_door_is_open = get_child_value(
state, "vehicleStatus.evStatus.chargePortDoorOpenStatus"
)
-
- if ev_charge_port_door_is_open == 1:
+
+ if ev_charge_port_door_is_open == 1:
vehicle.ev_charge_port_door_is_open = True
elif ev_charge_port_door_is_open == 2:
- vehicle.ev_charge_port_door_is_open = False
-
+ vehicle.ev_charge_port_door_is_open = False
+
vehicle.ev_driving_range = (
get_child_value(
state,
@@ -469,7 +492,7 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
),
DISTANCE_UNITS[get_child_value(state, "vehicleStatus.dte.unit")],
)
-
+
vehicle.ev_target_range_charge_AC = (
get_child_value(
state,
@@ -495,12 +518,12 @@ def _update_vehicle_properties(self, vehicle: Vehicle, state: dict) -> None:
],
)
- vehicle.washer_fluid_warning_is_on = get_child_value(state, "vehicleStatus.washerFluidStatus")
+ vehicle.washer_fluid_warning_is_on = get_child_value(state, "vehicleStatus.washerFluidStatus")
vehicle.fuel_level = get_child_value(state, "vehicleStatus.fuelLevel")
vehicle.fuel_level_is_low = get_child_value(state, "vehicleStatus.lowFuelLight")
vehicle.air_control_is_on = get_child_value(state, "vehicleStatus.airCtrlOn")
vehicle.smart_key_battery_warning_is_on = get_child_value(state, "vehicleStatus.smartKeyBatteryWarning")
-
+
if get_child_value(state, "vehicleLocation.coord.lat"):
vehicle.location = (
@@ -638,7 +661,7 @@ def stop_charge(self, token: Token, vehicle: Vehicle) -> None:
_check_response_for_errors(response)
def _get_charge_limits(self, token: Token, vehicle: Vehicle) -> dict:
- #Not currently used as value is in the general get. Most likely this forces the car the update it.
+ #Not currently used as value is in the general get. Most likely this forces the car the update it.
url = f"{self.SPA_API_URL}vehicles/{vehicle.id}/charge/target"
_LOGGER.debug(f"{DOMAIN} - Get Charging Limits Request")
@@ -766,7 +789,7 @@ def _get_cookies(self) -> dict:
+ self.CLIENT_ID
+ "&redirect_uri="
+ self.USER_API_URL
- + "oauth2/redirect&lang=en"
+ + "oauth2/redirect&lang=" + self.LANGUAGE
)
payload = {}
headers = {
@@ -781,7 +804,7 @@ def _get_cookies(self) -> dict:
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Encoding": "gzip, deflate",
- "Accept-Language": "en,en-US;q=0.9",
+ "Accept-Language": "en,en-US," + self.LANGUAGE + ";q=0.9",
}
_LOGGER.debug(f"{DOMAIN} - Get cookies request: {url}")
@@ -795,7 +818,7 @@ def _set_session_language(self, cookies) -> None:
### Set Language for Session ###
url = self.USER_API_URL + "language"
headers = {"Content-type": "application/json"}
- payload = {"lang": "en"}
+ payload = {"lang": self.LANGUAGE}
response = requests.post(url, json=payload, headers=headers, cookies=cookies)
def _get_authorization_code_with_redirect_url(
diff --git a/hyundai_kia_connect_api/VehicleManager.py b/hyundai_kia_connect_api/VehicleManager.py
index a3246cda..61e7d125 100644
--- a/hyundai_kia_connect_api/VehicleManager.py
+++ b/hyundai_kia_connect_api/VehicleManager.py
@@ -29,7 +29,7 @@
class VehicleManager:
- def __init__(self, region: int, brand: int, username: str, password: str, pin: str, geocode_api_enable: bool = False, geocode_api_use_email: bool = False):
+ def __init__(self, region: int, brand: int, username: str, password: str, pin: str, geocode_api_enable: bool = False, geocode_api_use_email: bool = False, language: str = "en"):
self.region: int = region
self.brand: int = brand
self.username: str = username
@@ -37,9 +37,10 @@ def __init__(self, region: int, brand: int, username: str, password: str, pin: s
self.geocode_api_enable: bool = geocode_api_enable
self.geocode_api_use_email: bool = geocode_api_use_email
self.pin: str = pin
+ self.language: str = language
self.api: ApiImpl = self.get_implementation_by_region_brand(
- self.region, self.brand
+ self.region, self.brand, self.language
)
self.token: Token = None
@@ -129,12 +130,12 @@ def close_charge_port(self, vehicle_id: str) -> str:
return self.api.charge_port_action(self.token, self.get_vehicle(vehicle_id), CHARGE_PORT_ACTION.CLOSE)
@staticmethod
- def get_implementation_by_region_brand(region: int, brand: int) -> ApiImpl:
+ def get_implementation_by_region_brand(region: int, brand: int, language: str) -> ApiImpl:
if REGIONS[region] == REGION_CANADA:
- return KiaUvoApiCA(region, brand)
+ return KiaUvoApiCA(region, brand, language)
elif REGIONS[region] == REGION_EUROPE:
- return KiaUvoApiEU(region, brand)
+ return KiaUvoApiEU(region, brand, language)
elif REGIONS[region] == REGION_USA and BRANDS[brand] == BRAND_HYUNDAI:
- return HyundaiBlueLinkAPIUSA(region, brand)
+ return HyundaiBlueLinkAPIUSA(region, brand, language)
elif REGIONS[region] == REGION_USA and BRANDS[brand] == BRAND_KIA:
- return KiaUvoAPIUSA(region, brand)
+ return KiaUvoAPIUSA(region, brand, language)
| Android Bluelink App language is reset to English when using hyundai_kia_connect_api
* Hyundai / Kia Connect version: v1.40.11
* Python version: 3.9.13
* Operating System: Windows 10 and Raspberry Pi 11 (bullseye)
* Android Bluelink App: 2.0.7
### Description
When the Android Bluelink App language is configured for non-English, by using the hyundai_kia_connect_api the Android Bluelink App is reset to English. So somehow the authentication/login/api does reset the Android Bluelink App to English.
### What I Did
I've looked at the source code of hyundai_kia_connect_api and I thought it's the side effect of the following python statement in KiaUvoApiEU.py, line 68:
```
if BRANDS[brand] == BRAND_KIA:
auth_client_id = "f4d531c7-1043-444d-b09a-ad24bd913dd4"
self.LOGIN_FORM_URL: str = (
"https://"
+ self.LOGIN_FORM_HOST
+ "/auth/realms/eukiaidm/protocol/openid-connect/auth?client_id="
+ auth_client_id
+ "&scope=openid%20profile%20email%20phone&response_type=code&hkid_session_reset=true&redirect_uri="
+ self.USER_API_URL
+ "integration/redirect/login&ui_locales=en&state=$service_id:$user_id"
)
elif BRANDS[brand] == BRAND_HYUNDAI:
auth_client_id = "64621b96-0f0d-11ec-82a8-0242ac130003"
self.LOGIN_FORM_URL: str = (
"https://"
+ self.LOGIN_FORM_HOST
+ "/auth/realms/euhyundaiidm/protocol/openid-connect/auth?client_id="
+ auth_client_id
+ "&scope=openid%20profile%20email%20phone&response_type=code&hkid_session_reset=true&redirect_uri="
+ self.USER_API_URL
+ "integration/redirect/login&ui_locales=en&state=$service_id:$user_id"
)
```
The statement "ui_locales=en" could cause the side effect? Should it be "ui_locales=de" for Germany and "ui_locales=nl" for the Netherlands? But changing that, still the language setting in the Android Bluelink App is reset to English.
| Could you try editing this and seeing if it helps? https://github.com/Hyundai-Kia-Connect/hyundai_kia_connect_api/blob/9a3beacd620044e8a5cd0d5d52ceb22b1f338e9b/hyundai_kia_connect_api/KiaUvoApiEU.py#L626
This line also references langauge:
https://github.com/Hyundai-Kia-Connect/hyundai_kia_connect_api/blob/9a3beacd620044e8a5cd0d5d52ceb22b1f338e9b/hyundai_kia_connect_api/KiaUvoApiEU.py#L597
We have had this reported on kia_uvo too but I haven't spent time testing since I don't have a car in EU. Makes it hard to test.
Reference someone else who had this. Not something I could focus on at the time but if you can test out and implement the languages the app supports I can extend into kia_uvo https://github.com/fuatakgun/kia_uvo/issues/339
I tried it out, the BlueLink App kept the language Dutch, when changing all above from en to nl.
Awesome. I assume the app has a few language options. I am thinking we may this an option parameter for the vehicle manager init? If not passed we just take default of en I find it odd this isn't just set and stored to the account but this is the way.
Ok, I have tried to make the changes, see the attached patch file.
But I was not sure if the way I included this in KiaUvoApiEU.py is the correct way to do this, namely:
```
LANGUAGE = VehicleManager.language
```
I also put the patch file in text here below,
```
Left base folder: C:\Users\Rick\git\monitor\hyundai_kia_connect_api-1.40.11
Right base folder: C:\Users\Rick\git\monitor\hyundai_kia_connect_api
diff -r KiaUvoApiEU.py KiaUvoApiEU.py
7a8
> import VehicleManager
38a40
> LANGUAGE = VehicleManager.language
77c79
< + "integration/redirect/login&ui_locales=en&state=$service_id:$user_id"
---
> + "integration/redirect/login&ui_locales=" + LANGUAGE + "&state=$service_id:$user_id"
88c90
< + "integration/redirect/login&ui_locales=en&state=$service_id:$user_id"
---
> + "integration/redirect/login&ui_locales=" + LANGUAGE + "&state=$service_id:$user_id"
668c670
< + "oauth2/redirect&lang=en"
---
> + "oauth2/redirect&lang=" + LANGUAGE
697c699
< payload = {"lang": "en"}
---
> payload = {"lang": LANGUAGE}
diff -r VehicleManager.py VehicleManager.py
31c31
< def __init__(self, region: int, brand: int, username: str, password: str, pin: str, geocode_api_enable: bool = False, geocode_api_use_email: bool = False):
---
> def __init__(self, region: int, brand: int, username: str, password: str, pin: str, geocode_api_enable: bool = False, geocode_api_use_email: bool = False, language: str = "en"):
38a39
> self.language: str = language
```
[Issue145.patch.txt](https://github.com/Hyundai-Kia-Connect/hyundai_kia_connect_api/files/10087198/Issue145.patch.txt)
Two thoughts would be look at using enums in the constant to define the valid options like this. I assume your app has a few languages.
https://github.com/Hyundai-Kia-Connect/hyundai_kia_connect_api/blob/e9880a0182ababcbeeeb735ac437066fcf493e3c/hyundai_kia_connect_api/const.py#L43-L46
As well worth setting a default to EN in case it isn't set. This will make it so it isn't a breaking change. Finally if you know the list maybe put it in a comment somewhere so we know what the EU region support as I bet NA will differ.
Actually I can select 15 languages, so an Enum is not so flexible. You just pass [the locale as a string according to the wanted language ](https://www.science.co.il/language/Locale-codes.php)is more flexible. In the patch I did add in the previous post, the default language is "en", so you just describe the possible language abbreviations in the README or as a link to a page describing the language abbreviations.
That works for me! Could you drop a PR? | 2022-12-19T11:39:40 | 0.0 | [] | [] |
||
jertel/elastalert2 | jertel__elastalert2-509 | cf7987f3d81666c89d8a18fd285bc23ab0b8070a | diff --git a/CHANGELOG.md b/CHANGELOG.md
index f3fd6da9..c246f39a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -21,6 +21,7 @@
- [Docs] Add exposed metrics documentation - [#498](https://github.com/jertel/elastalert2/pull/498) - @thisisxgp
- [Tests] Fix rules_test.py - [#499](https://github.com/jertel/elastalert2/pull/499) - @nsano-rururu
- Upgrade to Python 3.10 and Sphinx 4.2.0 - [#501](https://github.com/jertel/elastalert2/pull/501) - @jertel
+- max_scrolling_count now has a default value of 990 to avoid stack overflow crashes - [#509](https://github.com/jertel/elastalert2/pull/509) - @jertel
# 2.2.2
diff --git a/docs/source/elastalert.rst b/docs/source/elastalert.rst
index a7868f76..d939ecdc 100755
--- a/docs/source/elastalert.rst
+++ b/docs/source/elastalert.rst
@@ -179,8 +179,7 @@ default is 10,000, and if you expect to get near this number, consider using ``u
limit is reached, ElastAlert 2 will `scroll <https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
using the size of ``max_query_size`` through the set amount of pages, when ``max_scrolling_count`` is set or until processing all results.
-``max_scrolling_count``: The maximum amount of pages to scroll through. The default is ``0``, which means the scrolling has no limit.
-For example if this value is set to ``5`` and the ``max_query_size`` is set to ``10000`` then ``50000`` documents will be downloaded at most.
+``max_scrolling_count``: The maximum amount of pages to scroll through. The default is ``990``, to avoid a stack overflow error due to Python's stack limit of 1000. For example, if this value is set to ``5`` and the ``max_query_size`` is set to ``10000`` then ``50000`` documents will be downloaded at most.
``max_threads``: The maximum number of concurrent threads available to process scheduled rules. Large numbers of long-running rules may require this value be increased, though this could overload the Elasticsearch cluster if too many complex queries are running concurrently. Default is 10.
diff --git a/elastalert/config.py b/elastalert/config.py
index bdecfefb..381bb759 100644
--- a/elastalert/config.py
+++ b/elastalert/config.py
@@ -74,7 +74,7 @@ def load_conf(args, defaults=None, overwrites=None):
conf.setdefault('max_query_size', 10000)
conf.setdefault('scroll_keepalive', '30s')
- conf.setdefault('max_scrolling_count', 0)
+ conf.setdefault('max_scrolling_count', 990) # Avoid stack overflow in run_query, note that 1000 is Python's stack limit
conf.setdefault('disable_rules_on_error', True)
conf.setdefault('scan_subdirectories', True)
conf.setdefault('rules_loader', 'file')
| Fatal Python error: Cannot recover from stack overflow.
It happens here:
try:
if rule.get('scroll_id') and self.thread_data.num_hits < self.thread_data.total_hits and should_scrolling_continue(rule):
if not self.run_query(rule, start, end, scroll=True):
return False
except RuntimeError:
# It's possible to scroll far enough to hit max recursive depth
pass
This seems like whomever coded this was aware that this could happen, but i don't believe any kind of exception handling can deal with a stack overflow.
I don't have a reproduction unfortunately. This has happened once on an otherwise static setup. Of note is that my elastic instance can be quite slow at times.
| I'm not familiar with python, so please let me know. What kind of code should I fix?
I'll start by saying that I'm not particularly familiar with the code either.
At a high level I can see that the function run_query is recursive. While I don't know why the original author chose that, it generally seems like a bad idea. Rewriting it to be iterative instead of recursive seems like an improvement.
I'd welcome comments from anyone more familiar with elastalert.
Two fix pull requests have been merged in 2016 and 2019. It was the time of the original yelp / elastalert, and I don't know the details, but it seems that the problem often occurred.
2016
https://github.com/Yelp/elastalert/pull/652
>Fixes a bug that would sometimes cause infinite recursion:
>
>Some rule would trigger scrolling and scroll_id would get set.
Because, scroll_id, num_hits, total_hits were all shared between rules, scrolling might trigger on another rule.
The query might use get_hits_terms or get_hits_count which don't take scroll into account at all. They keep making the same query over and over.
Crash with maximum recursion depth
To fix this, scroll_id is set PER rule, and deleted after the recursion ends. We also just end the scroll when maximum recursion depth is reached.
2019
https://github.com/Yelp/elastalert/pull/2271
https://github.com/Yelp/elastalert/pull/2394
I found an unmerged pull request on the original yelp / elastalert in 2019.
refactor run_query
https://github.com/Yelp/elastalert/pull/2345
At a glance, that refactoring is about the right shape. It is of course done on an older version and would need to be merged forward and tested. Nice find.
@ferozsalam @jertel
What do you think about this issue? ..
Reason for question
I'm not using elastalert2, I'm just checking the operation to see if the changes work correctly only when I make a pull request. .. So I don't know if this is a frequent issue or if it needs to be addressed urgently.
I've not seen this error myself, but it is something that should be corrected. Python's stack limit is 1000 stack frames so it you have enough search results to an ElastAlert query then it is technically possible to hit this bug, depending on the scroll size. There are multiple options, in order of complexity:
1. Set a default value for `max_scrolling_count` to 990 to stop the recursion before it hits the stack limit. There's a point of diminishing returns for an alerting framework to actually need to pull tens of thousands of results back anyway.
2. Refactor the run_query() function to use looping instead of recursion. This solves the stack overflow issue, by continuing to utilize the scroll methodology, but this is not recommended practice for deep paging. Again though, what is an alerting system going to do with tens of thousands of search results?
3. Replace scrolling with the Elastic recommended `search_after` params, for deep paging. This is a bigger change and will need to use the point in time indexing.
#1 would be very simple to implement and update the docs.
| 2021-10-12T15:31:59 | 0.0 | [] | [] |
||
columnflow/columnflow | columnflow__columnflow-112 | acc61d6a22bcb0158bed29e3008e59ae292b0880 | diff --git a/columnflow/example_config/analysis_st.py b/columnflow/example_config/analysis_st.py
index a9592b45a..ac9c401d3 100644
--- a/columnflow/example_config/analysis_st.py
+++ b/columnflow/example_config/analysis_st.py
@@ -6,7 +6,7 @@
import re
-from scinum import Number, REL
+from scinum import Number
from order import Analysis, Shift
from columnflow.example_config.campaign_2018 import (
@@ -25,24 +25,23 @@
)
# analysis-global versions
-analysis_st.set_aux("versions", {
-})
+analysis_st.x.versions = {}
# files of bash sandboxes that might be required by remote tasks
# (used in cf.HTCondorWorkflow)
-analysis_st.set_aux("job_sandboxes", [
+analysis_st.x.job_sandboxes = [
"$CF_BASE/sandboxes/venv_columnar.sh",
-])
+]
# files of cmssw sandboxes that should be bundled for remote jobs in case they are needed
# (used in cf.HTCondorWorkflow)
-analysis_st.set_aux("cmssw_sandboxes", [
+analysis_st.x.cmssw_sandboxes = [
# "$CF_BASE/sandboxes/cmssw_default.sh",
-])
+]
# config groups for conveniently looping over certain configs
# (used in wrapper_factory)
-analysis_st.set_aux("config_groups", {})
+analysis_st.x.config_groups = {}
#
@@ -64,49 +63,51 @@
# default calibrator, selector, producer, ml model and inference model
-config_2018.set_aux("default_calibrator", "example")
-config_2018.set_aux("default_selector", "example")
-config_2018.set_aux("default_producer", "example")
-config_2018.set_aux("default_ml_model", None)
-config_2018.set_aux("default_inference_model", "example")
+config_2018.x.default_calibrator = "example"
+config_2018.x.default_selector = "example"
+config_2018.x.default_producer = "example"
+config_2018.x.default_ml_model = None
+config_2018.x.default_inference_model = "example"
+config_2018.x.default_categories = ("incl",)
+config_2018.x.default_variables = ("ht", "jet1_pt")
# process groups for conveniently looping over certain processs
# (used in wrapper_factory and during plotting)
-config_2018.set_aux("process_groups", {})
+config_2018.x.process_groups = {}
# dataset groups for conveniently looping over certain datasets
# (used in wrapper_factory and during plotting)
-config_2018.set_aux("dataset_groups", {})
+config_2018.x.dataset_groups = {}
# category groups for conveniently looping over certain categories
# (used during plotting)
-config_2018.set_aux("category_groups", {})
+config_2018.x.category_groups = {}
# variable groups for conveniently looping over certain variables
# (used during plotting)
-config_2018.set_aux("variable_groups", {})
+config_2018.x.variable_groups = {}
# shift groups for conveniently looping over certain shifts
# (used during plotting)
-config_2018.set_aux("shift_groups", {})
+config_2018.x.shift_groups = {}
# selector step groups for conveniently looping over certain steps
# (used in cutflow tasks)
-config_2018.set_aux("selector_step_groups", {
+config_2018.x.selector_step_groups = {
"example": ["Jet"],
-})
+}
# 2018 luminosity with values in inverse pb and uncertainties taken from
# https://twiki.cern.ch/twiki/bin/view/CMS/TWikiLUM?rev=171#LumiComb
-config_2018.set_aux("luminosity", Number(59740, {
- "lumi_13TeV_correlated": (REL, 0.02),
- "lumi_13TeV_2018": (REL, 0.015),
- "lumi_13TeV_1718": (REL, 0.002),
-}))
+config_2018.x.luminosity = Number(59740, {
+ "lumi_13TeV_correlated": 0.02j,
+ "lumi_13TeV_2018": 0.015j,
+ "lumi_13TeV_1718": 0.002j,
+})
# 2018 minimum bias cross section in mb (milli) for creating PU weights, values from
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/PileupJSONFileforData?rev=44#Pileup_JSON_Files_For_Run_II
-config_2018.set_aux("minbiasxs", Number(69.2, (REL, 0.046)))
+config_2018.x.minbiasxs = Number(69.2, 0.046j)
# helper to add column aliases for both shifts of a source
@@ -139,7 +140,7 @@ def add_aliases(shift_source: str, aliases: set[str], selection_dependent: bool)
)
# external files
-config_2018.set_aux("external_files", DotDict.wrap({
+config_2018.x.external_files = DotDict.wrap({
# files from TODO
"lumi": {
"golden": ("/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/Legacy_2018/Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt", "v1"), # noqa
@@ -156,10 +157,10 @@ def add_aliases(shift_source: str, aliases: set[str], selection_dependent: bool)
"minbias_xs_down": ("/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/PileUp/UltraLegacy/PileupHistogram-goldenJSON-13tev-2018-66000ub-99bins.root", "v1"), # noqa
},
},
-}))
+})
# columns to keep after certain steps
-config_2018.set_aux("keep_columns", DotDict.wrap({
+config_2018.x.keep_columns = DotDict.wrap({
"cf.ReduceEvents": {
"run", "luminosityBlock", "event",
"nJet", "Jet.pt", "Jet.eta", "Jet.btagDeepFlavB",
@@ -168,15 +169,14 @@ def add_aliases(shift_source: str, aliases: set[str], selection_dependent: bool)
"cf.MergeSelectionMasks": {
"mc_weight", "normalization_weight", "process_id", "category_ids", "cutflow.*",
},
-}))
+})
# event weight columns
-config_2018.set_aux("event_weights", ["normalization_weight", "pu_weight"])
+config_2018.x.event_weights = ["normalization_weight", "pu_weight"]
# versions per task family and optionally also dataset and shift
# None can be used as a key to define a default value
-config_2018.set_aux("versions", {
-})
+config_2018.x.versions = {}
# add categories
config_2018.add_category(
diff --git a/columnflow/tasks/framework/mixins.py b/columnflow/tasks/framework/mixins.py
index db0a147b3..884864b02 100644
--- a/columnflow/tasks/framework/mixins.py
+++ b/columnflow/tasks/framework/mixins.py
@@ -526,12 +526,14 @@ def store_parts(self) -> law.util.InsertableDict:
class CategoriesMixin(ConfigTask):
categories = law.CSVParameter(
- default=("incl",),
+ default=(),
description="comma-separated category names or patterns to select; can also be the key of "
- "a mapping defined in 'category_groups' auxiliary data of the config; default: ('incl',)",
+ "a mapping defined in 'category_groups' auxiliary data of the config; when empty, uses the "
+ "auxiliary data enty 'default_categories' when set; empty default",
brace_expand=True,
)
+ default_categories = None
allow_empty_categories = False
@classmethod
@@ -544,6 +546,15 @@ def modify_param_values(cls, params):
# resolve categories
if "categories" in params:
+ # when empty, use the config default
+ if not params["categories"] and config_inst.x("default_categories", ()):
+ params["categories"] = tuple(config_inst.x.default_categories)
+
+ # when still empty and default categories are defined, use them instead
+ if not params["categories"] and cls.default_categories:
+ params["categories"] = tuple(cls.default_categories)
+
+ # resolve them
categories = cls.find_config_objects(
params["categories"],
config_inst,
@@ -554,7 +565,7 @@ def modify_param_values(cls, params):
# complain when no categories were found
if not categories and not cls.allow_empty_categories:
- raise ValueError(f"no categories found matching '{params['categories']}'")
+ raise ValueError(f"no categories found matching {params['categories']}")
params["categories"] = tuple(categories)
@@ -591,10 +602,15 @@ def modify_param_values(cls, params):
# resolve variables
if "variables" in params:
- # when empty and default variables are defined, use them instead
+ # when empty, use the config default
+ if not params["variables"] and config_inst.x("default_variables", ()):
+ params["variables"] = tuple(config_inst.x.default_variables)
+
+ # when still empty and default variables are defined, use them instead
if not params["variables"] and cls.default_variables:
params["variables"] = tuple(cls.default_variables)
+ # resolve them
if params["variables"]:
# resolve variable names
variables = cls.find_config_objects(
@@ -609,7 +625,7 @@ def modify_param_values(cls, params):
# complain when no variables were found
if not variables and not cls.allow_empty_variables:
- raise ValueError(f"no variables found matching '{params['variables']}'")
+ raise ValueError(f"no variables found matching {params['variables']}")
params["variables"] = tuple(variables)
@@ -667,7 +683,7 @@ def modify_param_values(cls, params):
# complain when no processes were found
if not processes and not cls.allow_empty_processes:
- raise ValueError(f"no processes found matching '{params['processes']}'")
+ raise ValueError(f"no processes found matching {params['processes']}")
params["processes"] = tuple(processes)
@@ -694,7 +710,7 @@ def modify_param_values(cls, params):
# complain when no datasets were found
if not datasets and not cls.allow_empty_datasets:
- raise ValueError(f"no datasets found matching '{params['datasets']}'")
+ raise ValueError(f"no datasets found matching {params['datasets']}")
params["datasets"] = tuple(datasets)
@@ -746,7 +762,7 @@ def modify_param_values(cls, params):
# complain when no shifts were found
if not shifts and not cls.allow_empty_shift_sources:
- raise ValueError(f"no shifts found matching '{params['shift_sources']}'")
+ raise ValueError(f"no shifts found matching {params['shift_sources']}")
# convert back to sources
params["shift_sources"] = tuple(cls.reduce_shifts(shifts))
| Move residual analysis specific code
There are some left-over parts that seem too analysis specific and contain choices that maybe should not be part of columnflow. This issue is meant to document them for later action.
- [x] As mentioned in https://github.com/uhh-cms/columnflow/pull/90#discussion_r951242368, the PlotMixin contains some parameters that are somewhat too specific for the main base plot task. They should be moved to a less generic base task for (e.g.) 1D plots. However, even this choice seems somewhat too exclusive as plotting is always highly custom, so that such more specific plot base tasks should be rather analysis specific.
- [x] There's a "1mu" category in the mixins.
@mafrahm
| @mafrahm I just went through the parts that I had in mind and I think there is nothing too analysis specific left.
If you agree, we can close this one. | 2022-10-17T09:11:46 | 0.0 | [] | [] |
||
yaqwsx/KiKit | yaqwsx__KiKit-231 | cddb09d784fbd84d3736538f466597f43e6ab045 | diff --git a/kikit/eeshema.py b/kikit/eeshema.py
index becb2cc9..d22baf36 100644
--- a/kikit/eeshema.py
+++ b/kikit/eeshema.py
@@ -21,9 +21,17 @@ def getField(component, field):
return None
def readEeschemaLine(file):
- line = file.readline()
- if not line:
- raise EeschemaException("Cannot parse EEschema, line expected, got EOF")
+ line = ""
+ quotationOpen = False
+ while True:
+ c = file.read(1)
+ if c is None:
+ raise EeschemaException("Cannot parse EEschema, line expected, got EOF")
+ if c == '"':
+ quotationOpen = not quotationOpen
+ if c == '\n' and not quotationOpen:
+ break
+ line += c
return line.strip()
def readHeader(file):
@@ -44,26 +52,26 @@ def readHeader(file):
elif line.startswith(DESCR_STRING):
header["size"] = line[len(DESCR_STRING):].split()
elif line.startswith("Sheet"):
- items = line.split(maxsplit=3)
+ items = shlex.split(line)
header["sheet"] = (int(items[1]), int(items[2]))
elif line.startswith("Title"):
- header["title"] = line.split(maxsplit=2)[1]
+ header["title"] = shlex.split(line)[1]
elif line.startswith("Date"):
- header["date"] = line.split(maxsplit=2)[1]
+ header["date"] = shlex.split(line)[1]
elif line.startswith("Comp"):
- header["company"] = line.split(maxsplit=2)[1]
+ header["company"] = shlex.split(line)[1]
elif line.startswith("Rev"):
- header["revision"] = line.split(maxsplit=2)[1]
+ header["revision"] = shlex.split(line)[1]
elif line.startswith("Comment1"):
- header["comment1"] = line.split(maxsplit=2)[1]
+ header["comment1"] = shlex.split(line)[1]
elif line.startswith("Comment2"):
- header["comment2"] = line.split(maxsplit=2)[1]
+ header["comment2"] = shlex.split(line)[1]
elif line.startswith("Comment3"):
- header["comment3"] = line.split(maxsplit=2)[1]
+ header["comment3"] = shlex.split(line)[1]
elif line.startswith("Comment4"):
- header["comment4"] = line.split(maxsplit=2)[1]
+ header["comment4"] = shlex.split(line)[1]
elif line.startswith("encoding"):
- header["encoding"] = line.split(maxsplit=2)[1]
+ header["encoding"] = shlex.split(line)[1]
else:
raise EeschemaException(f"Unexpected line: '{line}'")
@@ -75,15 +83,15 @@ def readComponent(file, sheetPath=""):
return component
if line.startswith("L"):
- items = line.split()
+ items = shlex.split(line)
component["reference"] = items[2]
component["name"] = items[1]
elif line.startswith("U"):
- items = line.split()
+ items = shlex.split(line)
component["u"] = items[3]
component["unit"] = int(items[1])
elif line.startswith("P"):
- items = line.split()
+ items = shlex.split(line)
component["position"] = (int(items[1]), int(items[2]))
elif line.startswith("F"):
items = shlex.split(line)
@@ -142,7 +150,7 @@ def readSheet(file):
items = shlex.split(line)
sheet["f1"] = items[1]
elif line.startswith("U "):
- sheet["u"] = line.split()[1]
+ sheet["u"] = shlex.split(line)[1]
def extractComponents(filename, path=""):
"""
| [BUG] Reading header data fails when spaces present
https://github.com/yaqwsx/KiKit/blob/d2f3132a0a7699fd034f3fc09f4240b94cd416b5/kikit/eeshema.py#L50
For example, if the title of my schematic is "My Very First Schematic", then I get this result:
```python
from kikit.eeshema import readHeader
with open('my_schematic.sch') as input:
header = readHeader(input)
print(f"Title is: '{header["title"]}')
```
```
Title is: '"My'
```
[BUG] Reading header data fails when spaces present
https://github.com/yaqwsx/KiKit/blob/d2f3132a0a7699fd034f3fc09f4240b94cd416b5/kikit/eeshema.py#L50
For example, if the title of my schematic is "My Very First Schematic", then I get this result:
```python
from kikit.eeshema import readHeader
with open('my_schematic.sch') as input:
header = readHeader(input)
print(f"Title is: '{header["title"]}')
```
```
Title is: '"My'
```
| The parser is pretty naive - it reads the file line by line. I will look into it. Basically what we need is a quotation-aware line splitter.
The parser is pretty naive - it reads the file line by line. I will look into it. Basically what we need is a quotation-aware line splitter. | 2021-10-22T07:59:38 | 0.0 | [] | [] |
||
a-r-j/graphein | a-r-j__graphein-321 | b701d18be9bb96f3fb92d5cbf3933edb08752758 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 672075c3..797a32e4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,8 @@
* Fixes incorrect jaxtyping syntax for variable size dimensions [#312](https://github.com/a-r-j/graphein/pull/312)
* Fixes shape of angle embeddings for `graphein.protein.tesnor.angles.alpha/kappa`. [#315](https://github.com/a-r-j/graphein/pull/315)
* Fixes initialisation of `Protein` objects. [#317](https://github.com/a-r-j/graphein/issues/317) [#318](https://github.com/a-r-j/graphein/pull/318)
+* Fixes incorrect `rad` and `embed` argument logic in `graphein.protein.tensor.angles.dihedrals/sidechain_torsion` [#321](https://github.com/a-r-j/graphein/pull/321)
+* Fixes incorrect start padding in pNeRF output [#321](https://github.com/a-r-j/graphein/pull/321)
#### Other Changes
* Adds entry point for biopandas dataframes in `graphein.protein.tensor.io.protein_to_pyg`. [#310](https://github.com/a-r-j/graphein/pull/310)
diff --git a/graphein/protein/tensor/angles.py b/graphein/protein/tensor/angles.py
index 3fd47282..b91c9301 100644
--- a/graphein/protein/tensor/angles.py
+++ b/graphein/protein/tensor/angles.py
@@ -134,38 +134,50 @@ def sidechain_torsion(
coords[:, 3, :].unsqueeze(1),
)
- if rad:
- angles = angles * torch.pi / 180
+ if embed and not rad:
+ raise ValueError("Cannot embed torsion angles in degrees.")
+
+ if not rad:
+ angles = angles * 180 / torch.pi
+
angles, mask = to_dense_batch(angles, idxs)
angles = angles.squeeze(-1)
# Interleave sin and cos transformed tensors
- angles = rearrange(
- [torch.cos(angles), torch.sin(angles)], "t h w-> h (w t)"
- )
- mask = rearrange([mask, mask], "t h w -> h (w t)")
+ if embed:
+ angles = rearrange(
+ [torch.cos(angles), torch.sin(angles)], "t h w-> h (w t)"
+ )
+ mask = rearrange([mask, mask], "t h w -> h (w t)")
# Pad if last residues are a run of ALA, GLY or UNK
post_pad_len = 0
res_types = copy.deepcopy(res_types)
res_types.reverse()
+ PAD_RESIDUES = ["ALA", "GLY", "UNK"]
+ # If we have selenocysteine but no Se atoms,
+ # add it to the list of residues to pad since
+ if not selenium:
+ PAD_RESIDUES.append("SEC")
for res in res_types:
- PAD_RESIDUES = ["ALA", "GLY", "UNK"]
- if not selenium:
- PAD_RESIDUES.append("SEC")
-
if res in PAD_RESIDUES:
post_pad_len += 1
else:
break
if post_pad_len != 0:
- msk = torch.tensor(
- [1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], device=coords.device
- ).repeat(post_pad_len, 1)
- mask_msk = torch.tensor([False] * 8, device=coords.device).repeat(
- post_pad_len, 1
- )
+ if embed:
+ msk = torch.tensor(
+ [1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], device=coords.device
+ ).repeat(post_pad_len, 1)
+ mask_msk = torch.tensor([False] * 8, device=coords.device).repeat(
+ post_pad_len, 1
+ )
+ else:
+ msk = torch.zeros(post_pad_len, 4, device=coords.device)
+ mask_msk = torch.zeros(
+ post_pad_len, 4, device=coords.device, dtype=bool
+ )
angles = torch.vstack([angles, msk])
mask = torch.vstack([mask, mask_msk])
@@ -213,6 +225,9 @@ def kappa(
:return: Tensor of bend angles
:rtype: torch.Tensor
"""
+ if not rad and embed:
+ raise ValueError("Cannot embed kappa angles in degrees.")
+
if x.ndim == 3:
x = x[:, ca_idx, :]
@@ -281,6 +296,10 @@ def alpha(
:return: Tensor of dihedral angles
:rtype: torch.Tensor
"""
+ if not rad and embed:
+ raise ValueError(
+ "Cannot embed angles on unit circle if not in radians."
+ )
if x.ndim == 3:
x = x[:, ca_idx, :]
@@ -470,6 +489,11 @@ def dihedrals(
) -> DihedralTensor:
length = coords.shape[0]
+ if embed and not rad:
+ raise ValueError(
+ "Cannot embed angles in degrees. Use embed=True and rad=True."
+ )
+
if batch is None:
batch = torch.zeros(length, device=coords.device).long()
@@ -492,7 +516,7 @@ def dihedrals(
angles = torch.stack([phi, psi, omg], dim=2)
- if rad:
+ if not rad:
angles = angles * 180 / np.pi
if sparse:
diff --git a/graphein/protein/tensor/pnerf.py b/graphein/protein/tensor/pnerf.py
index 4263f440..1f5706ce 100644
--- a/graphein/protein/tensor/pnerf.py
+++ b/graphein/protein/tensor/pnerf.py
@@ -277,13 +277,14 @@ def extend(prev_three_coords, point, multi_m):
)
coords_trans = torch.cat([coords_pretrans[i], transformed_coords], 0)
- coords = F.pad(
- coords_trans[: total_num_angles - 1], (0, 0, 0, 0, 1, 0)
- ) # original
- # coords = F.pad(coords_trans[: total_num_angles - 2], (0, 0, 0, 0, 2, 0))
- # coords = F.pad(coords_trans[: total_num_angles - 3], (0, 0, 0, 0, 3, 0))
- # return coords
- # return coords_trans
+ # coords = F.pad(
+ # coords_trans[: total_num_angles - 1], (0, 0, 0, 0, 1, 0)
+ # ) # original
+
+ # Pad and set first Ca to origin
+ coords = F.pad(coords_trans[: total_num_angles - 2], (0, 0, 0, 0, 2, 0))
+ # Set first N to canonical position
+ coords[0, 0] = torch.tensor([[-1.4584, 0, 0]])
return coords
| FoldCompDataset calls graphein.protein.tensor.Protein with incorrect __init__ arguments
Hi y'all, love the library. It's been a huge help for me so far. I wanted to make use of the foldcomp integration but it isn't creating the Protein data objects correctly.
**Describe the bug**
This part here in `fc_to_pyg`:
https://github.com/a-r-j/graphein/blob/47d9dde5e5dc2cd085269520d2ccf34ced1d6b68/graphein/ml/datasets/foldcomp_dataset.py#L297-L305
tries to construct a `Protein` object, but `Protein`'s init method looks like
https://github.com/a-r-j/graphein/blob/47d9dde5e5dc2cd085269520d2ccf34ced1d6b68/graphein/protein/tensor/data.py#L134-L136
so trying to get proteins out of `FoldCompDataset` always errors with `TypeError: __init__() got an unexpected keyword argument 'coords'`
**To Reproduce**
Steps to reproduce the behavior:
1. Run the first three cells of foldcomp.ipynb.
**Expected behavior**
`FoldCompDataset` to iterate through `Protein` objects
**Desktop (please complete the following information):**
- OS: Ubuntu 22.04
- Python Version: 3.9
- Graphein Version [e.g. 22] & how it was installed: 47d9dde, from source
| 2023-05-22T20:37:44 | 0.0 | [] | [] |
|||
scikit-hep/cabinetry | scikit-hep__cabinetry-373 | 29065b590284e6fa159181238194cbf94bfc76fe | diff --git a/src/cabinetry/visualize/__init__.py b/src/cabinetry/visualize/__init__.py
index 034acd52..7ffe8b41 100644
--- a/src/cabinetry/visualize/__init__.py
+++ b/src/cabinetry/visualize/__init__.py
@@ -509,6 +509,8 @@ def ranking(
) -> mpl.figure.Figure:
"""Produces a ranking plot showing the impact of parameters on the POI.
+ The parameters are shown in decreasing order of greatest post-fit impact.
+
Args:
ranking_results (fit.RankingResults): fit results, and pre- and post-fit impacts
figure_folder (Union[str, pathlib.Path], optional): path to the folder to save
@@ -524,13 +526,14 @@ def ranking(
# path is None if figure should not be saved
figure_path = pathlib.Path(figure_folder) / "ranking.pdf" if save_figure else None
- # sort parameters by decreasing average post-fit impact
- avg_postfit_impact = (
- np.abs(ranking_results.postfit_up) + np.abs(ranking_results.postfit_down)
- ) / 2
+ # sort parameters by decreasing maximum post-fit impact
+ max_postfit_impact = np.maximum(
+ np.abs(ranking_results.postfit_up),
+ np.abs(ranking_results.postfit_down),
+ )
# get indices to sort by decreasing impact
- sorted_indices = np.argsort(avg_postfit_impact)[::-1]
+ sorted_indices = np.argsort(max_postfit_impact)[::-1]
bestfit = ranking_results.bestfit[sorted_indices]
uncertainty = ranking_results.uncertainty[sorted_indices]
labels = np.asarray(ranking_results.labels)[sorted_indices] # labels are list
| Ranking plot parameter order: max impact rather than average impact?
The ordering of parameters in `cabinetry.visualize.ranking` [sorts parameters by the _average_ post-fit up- and down- impact](https://github.com/scikit-hep/cabinetry/blob/master/src/cabinetry/visualize/__init__.py#L529).
IMO a more natural/useful ordering is _greatest_ post-fit impact in whichever direction
(i.e. `np.maximum(np.abs(postfit_up), np.abs(postfit_down))`),
since unlike the average, this corresponds to a potential fit behavior (pulling the parameter significantly in a given direction).
Ofc. for many cases there should be basically no difference.
But in the cases where it does make a difference (parameters with very asymmetric/one-sided impact)
these "weird" parameters are sorted lower by the average impact & could even be hidden when also using `max_parameters`.
OTOH I noticed that `cabinetry.visualize.plot_result.ranking` [already uses the max](https://github.com/scikit-hep/cabinetry/blob/628150152c8bc309a2ffb88b2f3ac95582bcbb74/src/cabinetry/visualize/plot_result.py#L225) for axis limits, so is there maybe a reason to use the avg. when sorting?
| Hi @lhenkelm, I agree that an ordering by the maximum impact in either direction makes more sense here for the reasons you mention. The max is used for the visualization range just to ensure nothing exceeds the axis range.
Feel free to submit a PR changing the ordering if you'd like, otherwise I can also take care of it. | 2022-10-18T16:56:14 | 0.0 | [] | [] |
||
SneaksAndData/adapta | SneaksAndData__adapta-316 | a16999fb6e6d44ee3d1b79e835babd23d27eb7bf | diff --git a/adapta/storage/distributed_object_store/datastax_astra/astra_client.py b/adapta/storage/distributed_object_store/datastax_astra/astra_client.py
index 106a1a23..705c9997 100644
--- a/adapta/storage/distributed_object_store/datastax_astra/astra_client.py
+++ b/adapta/storage/distributed_object_store/datastax_astra/astra_client.py
@@ -26,6 +26,7 @@
import tempfile
import typing
import uuid
+from concurrent.futures import ThreadPoolExecutor
from dataclasses import fields, is_dataclass
from typing import Optional, Dict, TypeVar, Callable, Type, List, Any, get_origin
@@ -188,6 +189,7 @@ def filter_entities(
primary_keys: Optional[List[str]] = None,
partition_keys: Optional[List[str]] = None,
deduplicate=False,
+ num_threads: Optional[int] = None,
) -> pandas.DataFrame:
"""
Run a filter query on the entity of type TModel backed by table `table_name`.
@@ -208,7 +210,8 @@ class Test:
:param: select_columns: An optional list of columns to return with the query.
:param: primary_keys: An optional list of columns that constitute a primary key, if it cannot be inferred from is_primary_key metadata on a dataclass field.
:param: partition_keys: An optional list of columns that constitute a partition key, if it cannot be inferred from is_partition_key metadata on a dataclass field.
- param: deduplicate: Optionally deduplicate query result, for example when only the partition key part of a primary key is used to fetch results.
+ :param: deduplicate: Optionally deduplicate query result, for example when only the partition key part of a primary key is used to fetch results.
+ :param: num_threads: Optionally run filtering using multiple threads.
"""
def apply(model: Type[Model], key_column_filter: Dict[str, Any], columns_to_select: Optional[List[str]]):
@@ -224,6 +227,11 @@ def normalize_column_name(column_name: str) -> str:
return column_name.replace(filter_suffix[0], "")
+ def to_pandas(
+ model: Type[Model], key_column_filter: Dict[str, Any], columns_to_select: Optional[List[str]]
+ ) -> pandas.DataFrame:
+ return pandas.DataFrame([dict(v.items()) for v in list(apply(model, key_column_filter, columns_to_select))])
+
assert (
self._session is not None
), "Please instantiate an AstraClient using with AstraClient(...) before calling this method"
@@ -238,12 +246,27 @@ def normalize_column_name(column_name: str) -> str:
select_columns=select_columns,
)
- result = pandas.concat(
- [
- pandas.DataFrame([dict(v.items()) for v in list(apply(model_class, key_column_filter, select_columns))])
- for key_column_filter in key_column_filter_values
- ]
- )
+ if num_threads:
+ with ThreadPoolExecutor(max_workers=num_threads) as tpe:
+ result = pandas.concat(
+ tpe.map(
+ lambda args: to_pandas(*args),
+ [
+ (model_class, key_column_filter, select_columns)
+ for key_column_filter in key_column_filter_values
+ ],
+ chunksize=max(int(len(key_column_filter_values) / num_threads), 1),
+ )
+ )
+ else:
+ result = pandas.concat(
+ [
+ pandas.DataFrame(
+ [dict(v.items()) for v in list(apply(model_class, key_column_filter, select_columns))]
+ )
+ for key_column_filter in key_column_filter_values
+ ]
+ )
if select_columns:
filter_columns = {
| parallelize astra filter entities for big key_column_filter_values
Case in point:
Routes contains about 1000 pairs of source and sink pairs.
`
filters = routes.to_dict(orient="records")
result = astra_client.filter_entities(
model_class=OmniChannelShippingDataModel,
table_name=socket.data_path,
key_column_filter_values=filters,
select_columns=[
SHIPPING.merge_key,
SHIPPING.source_location_key,
SHIPPING.sink_location_key,
SHIPPING.parcel_broker_name,
SHIPPING.shipping_courier_name,
SHIPPING.shipping_service_type,
SHIPPING.shipping_lead_time_max,
SHIPPING.parcel_cost,
SHIPPING.unit_transportation_cost,
SHIPPING.capacity_volume,
SHIPPING.capacity_weight,
SHIPPING.capacity_sku,
SHIPPING.volume_utilization,
],
)
`
In this case, we are fetching each pair (source and sink) one by one
| 2023-08-30T12:09:20 | 0.0 | [] | [] |
|||
snuailab/waffle_utils | snuailab__waffle_utils-38 | 9eb580ccf4ba4a1034eef038238a9b21002778f8 | diff --git a/waffle_utils/run.py b/waffle_utils/run.py
index b426f8f..ace592d 100644
--- a/waffle_utils/run.py
+++ b/waffle_utils/run.py
@@ -9,6 +9,7 @@
DEFAULT_IMAGE_EXTENSION,
SUPPORTED_IMAGE_EXTENSION,
)
+from waffle_utils.video import SUPPORTED_VIDEO_EXTENSION
from waffle_utils.video.tools import (
DEFAULT_FRAME_RATE,
create_video,
@@ -46,11 +47,11 @@ def _get_file_from_url(
@app.command(name="unzip")
def _unzip(
- url: str = typer.Option(..., help="download link"),
+ file_path: str = typer.Option(..., help="zip file link"),
output_dir: str = typer.Option(..., help="output directory"),
create_directory: bool = True,
):
- unzip(url, output_dir, create_directory=create_directory)
+ unzip(file_path, output_dir, create_directory=create_directory)
@app.command(name="from_coco")
| Revert "Feature/video frame extractor"
Reverts snuailab/waffle_utils#19
| 2023-03-14T06:09:00 | 0.0 | [] | [] |
|||
EBI-Metagenomics/emg-toolkit | EBI-Metagenomics__emg-toolkit-14 | dacb1ec48c6bd9cfab92daa07df86389a15312fb | diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 7fdcf17..266f4dc 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,8 +1,9 @@
[bumpversion]
-current_version = 0.8.0
+current_version = 0.9.0
commit = True
tag = True
[bumpversion:file:setup.py]
[bumpversion:file:mg_toolkit/__init__.py]
+
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 40ced60..ea1bd95 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,9 +3,9 @@ repos:
rev: v3.2.0
hooks:
- id: trailing-whitespace
- exclude: '^tests/*'
+ exclude: '^tests/*|.bumpversion.cfg'
- id: end-of-file-fixer
- exclude: '^tests/*'
+ exclude: '^tests/*|.bumpversion.cfg'
- id: check-yaml
- id: check-added-large-files
- id: check-merge-conflict
diff --git a/mg_toolkit/__init__.py b/mg_toolkit/__init__.py
index e28dd8e..c77bde0 100644
--- a/mg_toolkit/__init__.py
+++ b/mg_toolkit/__init__.py
@@ -20,4 +20,4 @@
__all__ = ["original_metadata", "sequence_search", "bulk_download"]
-__version__ = "0.8.0"
+__version__ = "0.9.0"
diff --git a/mg_toolkit/bulk_download.py b/mg_toolkit/bulk_download.py
index 164a40b..1bde8fe 100644
--- a/mg_toolkit/bulk_download.py
+++ b/mg_toolkit/bulk_download.py
@@ -226,7 +226,7 @@ def run(self):
while total_results_processed < num_results:
- num_results_processed += self.process_page(response_data, progress_bar)
+ num_results_processed = self.process_page(response_data, progress_bar)
total_results_processed += num_results_processed
# navigate to the next link
diff --git a/setup.py b/setup.py
index 199230d..2ca86f6 100644
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,7 @@
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(exclude=["ez_setup"]),
- version="0.8.0",
+ version="0.9.0",
python_requires=">=3.5",
install_requires=install_requirements,
setup_requires=["pytest-runner"],
| Fix bug with bulk_downloader.
The bulk downloader wasn't using the pagination when getting the files.
| 2020-11-20T21:30:29 | 0.0 | [] | [] |
|||
stac-utils/stac-api-validator | stac-utils__stac-api-validator-217 | 53b561d3ab2f372c5371146ed3309d71ca0f4760 | diff --git a/src/stac_api_validator/__main__.py b/src/stac_api_validator/__main__.py
index 0cbd426..3be2be4 100644
--- a/src/stac_api_validator/__main__.py
+++ b/src/stac_api_validator/__main__.py
@@ -66,7 +66,6 @@ def main(
auth_query_parameter: Optional[str] = None,
) -> int:
"""STAC API Validator."""
-
logging.basicConfig(stream=sys.stdout, level=log_level)
try:
@@ -97,9 +96,9 @@ def main(
click.secho("Errors: none", fg="green")
if errors:
- return 1
+ sys.exit(1)
else:
- return 0
+ sys.exit(0)
if __name__ == "__main__":
| Non-zero exit code for validation errors
I'd like to use **stac-api-validator** in a CI/CD pipeline: https://github.com/stac-utils/stac-fastapi/pull/508. Currently, **stac-api-validator** is returning a `0` exit code even if there's a jsonschema validation error (out-of-the-box stac-fastapi-pgstac produces invalid STAC ATM):
<img width="1251" alt="Screen Shot 2022-12-30 at 7 17 26 AM" src="https://user-images.githubusercontent.com/58314/210079912-db99f6b1-7af5-4bfe-b68d-26b9120ade4c.png">
It would be nice to return a non-zero code that would cause my Github workflow to fail.
| 2023-01-10T17:50:38 | 0.0 | [] | [] |
|||
a-r-j/graphein | a-r-j__graphein-208 | c07fc780ab8b923100a4e943bee991b00e77d3a6 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 80ccadf3..57f7d4b5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,20 @@
+### 1.5.2
+
+### GRN
+* [Bugfix] - [#208](https://github.com/a-r-j/graphein/pull/208) - Resolves SSL issues with RegNetwork.
+
+
+#### ML
+* [Feature] - [#208](https://github.com/a-r-j/graphein/pull/208) support for loading local pdb files by ``ProteinGraphDataset`` and ``InMemoryProteinGraphDataset``.
+>by adding a params:`pdb_paths` and set the `self.raw_dir` to the root path(`self.pdb_path`) of pdb_paths list (the root path should be only one, pdb files should be under the same folder).
+>
+>it will works from loading pdb files from the `self.pdb_path` instead of loading from self.raw.
+> If desire to download from af2 or pdb, just set `pdb_paths` to `None` and it goes back to the former version.
+
+#### CI
+* [Bugfix] - [#208](https://github.com/a-r-j/graphein/pull/208) explicitly installs `jupyter_contrib_nbextensions` in Docker.
+
+
### 1.5.1
#### Protein
diff --git a/Dockerfile b/Dockerfile
index c86d49b1..ec0d6904 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -42,9 +42,7 @@ RUN conda install -c fvcore -c iopath -c conda-forge fvcore iopath
RUN conda install -c pytorch3d pytorch3d
RUN conda install -c dglteam dgl
RUN conda install -c salilab dssp
-
RUN conda install -c conda-forge ipywidgets
-RUN jupyter nbextension enable --py widgetsnbextension
RUN export CUDA=$(python -c "import torch; print('cu'+torch.version.cuda.replace('.',''))") \
&& export TORCH=$(python -c "import torch; print(torch.__version__)") \
@@ -54,6 +52,8 @@ RUN export CUDA=$(python -c "import torch; print('cu'+torch.version.cuda.replace
&& pip install torch-spline-conv -f https://pytorch-geometric.com/whl/torch-${TORCH}+${CUDA}.html --no-cache-dir \
&& pip install torch-geometric --no-cache-dir
+RUN pip install jupyter_contrib_nbextensions
+RUN jupyter nbextension enable --py widgetsnbextension
# Testing
# docker-compose -f docker-compose.cpu.yml up -d --build
diff --git a/graphein/grn/parse_regnetwork.py b/graphein/grn/parse_regnetwork.py
index 8b0a2282..8677866b 100644
--- a/graphein/grn/parse_regnetwork.py
+++ b/graphein/grn/parse_regnetwork.py
@@ -14,6 +14,7 @@
import pandas as pd
import wget
+import ssl
from graphein.utils.utils import filter_dataframe, ping
@@ -41,10 +42,10 @@ def _download_RegNetwork(
"RegNetwork is not available. Please check your internet connection or verify at: http://www.regnetworkweb.org"
)
- mouse_url = "http://regnetworkweb.org/download/mouse.zip"
+ mouse_url = "https://regnetworkweb.org/download/mouse.zip"
if network_type == "human":
- human_url = "http://www.regnetworkweb.org/download/human.zip"
+ human_url = "https://regnetworkweb.org/download/human.zip"
url = human_url
elif network_type == "mouse":
url = mouse_url
@@ -66,8 +67,12 @@ def _download_RegNetwork(
# Download data and unzip
if not os.path.exists(file):
log.info("Downloading RegNetwork ...")
+ # switch ssl context for unverified download
+ default_https_context = ssl._create_default_https_context
+ ssl._create_default_https_context = ssl._create_unverified_context
wget.download(url, compressed_file)
-
+ # switch ssl context back to default
+ ssl._create_default_https_context = default_https_context
with zipfile.ZipFile(compressed_file, "r") as zip_ref:
zip_ref.extractall(out_dir)
@@ -80,7 +85,7 @@ def _download_RegNetwork_regtypes(root_dir: Optional[Path] = None) -> str:
:param root_dir: Path object specifying the location to download RegNetwork to
"""
- url = "http://www.regnetworkweb.org/download/RegulatoryDirections.zip"
+ url = "https://regnetworkweb.org/download/RegulatoryDirections.zip"
if root_dir is None:
root_dir = Path(__file__).parent.parent.parent / "datasets"
@@ -94,7 +99,12 @@ def _download_RegNetwork_regtypes(root_dir: Optional[Path] = None) -> str:
# Download data and unzip
if not os.path.exists(file):
log.info("Downloading RegNetwork reg types ...")
+ # switch ssl context for unverified download
+ default_https_context = ssl._create_default_https_context
+ ssl._create_default_https_context = ssl._create_unverified_context
wget.download(url, compressed_file)
+ # switch ssl context back to default
+ ssl._create_default_https_context = default_https_context
with zipfile.ZipFile(compressed_file, "r") as zip_ref:
zip_ref.extractall(out_dir)
diff --git a/graphein/ml/datasets/torch_geometric_dataset.py b/graphein/ml/datasets/torch_geometric_dataset.py
index 3665918f..5de6f9eb 100644
--- a/graphein/ml/datasets/torch_geometric_dataset.py
+++ b/graphein/ml/datasets/torch_geometric_dataset.py
@@ -41,6 +41,7 @@ def __init__(
self,
root: str,
name: str,
+ pdb_paths: Optional[List[str]] = None,
pdb_codes: Optional[List[str]] = None,
uniprot_ids: Optional[List[str]] = None,
graph_label_map: Optional[Dict[str, torch.Tensor]] = None,
@@ -72,6 +73,8 @@ def __init__(
:type root: str
:param name: Name of the dataset. Will be saved to ``data_$name.pt``.
:type name: str
+ :param pdb_paths: List of full path of pdb files to load. Defaults to ``None``.
+ :type pdb_paths: Optional[List[str]], optional
:param pdb_codes: List of PDB codes to download and parse from the PDB.
Defaults to None.
:type pdb_codes: Optional[List[str]], optional
@@ -135,6 +138,23 @@ def __init__(
else None
)
+ self.pdb_paths = pdb_paths
+ if self.pdb_paths is None:
+ if self.pdb_codes and self.uniprot_ids:
+ self.structures = self.pdb_codes + self.uniprot_ids
+ elif self.pdb_codes:
+ self.structures = pdb_codes
+ elif self.uniprot_ids:
+ self.structures = uniprot_ids
+ # Use local saved pdb_files instead of download or move them to self.root/raw dir
+ else:
+ if isinstance(self.pdb_paths, list):
+ self.structures = [
+ os.path.splitext(os.path.split(pdb_path)[-1])[0]
+ for pdb_path in self.pdb_paths
+ ]
+ self.pdb_path, _ = os.path.split(self.pdb_paths[0])
+
if self.pdb_codes and self.uniprot_ids:
self.structures = self.pdb_codes + self.uniprot_ids
elif self.pdb_codes:
@@ -157,6 +177,7 @@ def __init__(
self.graph_transformation_funcs = graph_transformation_funcs
self.pdb_transform = pdb_transform
self.num_cores = num_cores
+ self.af_version = af_version
super().__init__(
root,
transform=transform,
@@ -176,6 +197,13 @@ def processed_file_names(self) -> List[str]:
"""Name of the processed file."""
return [f"data_{self.name}.pt"]
+ @property
+ def raw_dir(self) -> str:
+ if self.pdb_paths is not None:
+ return self.pdb_path # replace raw dir with user local pdb_path
+ else:
+ return os.path.join(self.root, "raw")
+
def download(self):
"""Download the PDB files from RCSB or Alphafold."""
self.config.pdb_dir = Path(self.raw_dir)
@@ -298,7 +326,8 @@ def process(self):
class ProteinGraphDataset(Dataset):
def __init__(
self,
- root,
+ root: str,
+ pdb_paths: Optional[List[str]] = None,
pdb_codes: Optional[List[str]] = None,
uniprot_ids: Optional[List[str]] = None,
# graph_label_map: Optional[Dict[str, int]] = None,
@@ -327,6 +356,8 @@ def __init__(
:param root: Root directory where the dataset should be saved.
:type root: str
+ :param pdb_paths: List of full path of pdb files to load. Defaults to ``None``.
+ :type pdb_paths: Optional[List[str]], optional
:param pdb_codes: List of PDB codes to download and parse from the PDB.
Defaults to ``None``.
:type pdb_codes: Optional[List[str]], optional
@@ -388,14 +419,22 @@ def __init__(
if uniprot_ids is not None
else None
)
-
- if self.pdb_codes and self.uniprot_ids:
- self.structures = self.pdb_codes + self.uniprot_ids
- elif self.pdb_codes:
- self.structures = pdb_codes
- elif self.uniprot_ids:
- self.structures = uniprot_ids
- self.af_version = af_version
+ self.pdb_paths = pdb_paths
+ if self.pdb_paths is None:
+ if self.pdb_codes and self.uniprot_ids:
+ self.structures = self.pdb_codes + self.uniprot_ids
+ elif self.pdb_codes:
+ self.structures = pdb_codes
+ elif self.uniprot_ids:
+ self.structures = uniprot_ids
+ # Use local saved pdb_files instead of download or move them to self.root/raw dir
+ else:
+ if isinstance(self.pdb_paths, list):
+ self.structures = [
+ os.path.splitext(os.path.split(pdb_path)[-1])[0]
+ for pdb_path in self.pdb_paths
+ ]
+ self.pdb_path, _ = os.path.split(self.pdb_paths[0])
# Labels & Chains
@@ -424,6 +463,7 @@ def __init__(
self.num_cores = num_cores
self.pdb_transform = pdb_transform
self.graph_transformation_funcs = graph_transformation_funcs
+ self.af_version = af_version
super().__init__(
root,
transform=transform,
@@ -450,6 +490,13 @@ def processed_file_names(self) -> List[str]:
else:
return [f"{pdb}.pt" for pdb in self.structures]
+ @property
+ def raw_dir(self) -> str:
+ if self.pdb_paths is not None:
+ return self.pdb_path # replace raw dir with user local pdb_path
+ else:
+ return os.path.join(self.root, "raw")
+
def validate_input(self):
if self.graph_label_map is not None:
assert len(self.structures) == len(
@@ -554,6 +601,7 @@ def divide_chunks(l: List[str], n: int = 2) -> Generator:
# Create graph objects
file_names = [f"{self.raw_dir}/{pdb}.pdb" for pdb in pdbs]
+
graphs = construct_graphs_mp(
pdb_path_it=file_names,
config=self.config,
diff --git a/notebooks/dataloader_tutorial.ipynb b/notebooks/dataloader_tutorial.ipynb
index 57d68cd4..3b5dcfd8 100644
--- a/notebooks/dataloader_tutorial.ipynb
+++ b/notebooks/dataloader_tutorial.ipynb
@@ -54,6 +54,8 @@
" # Root directory where the dataset should be saved.\n",
" name: str, \n",
" # Name of the dataset. Will be saved to ``data_$name.pt``.\n",
+ " pdb_paths:Optional[List[str]] =None,\n",
+ " # List of full path of pdb files to load.\n",
" pdb_codes: Optional[List[str]] = None, \n",
" # List of PDB codes to download and parse from the PDB.\n",
" uniprot_ids: Optional[List[str]] = None, \n",
@@ -90,7 +92,7 @@
"#### Directory Structure\n",
"Creating a ``ProteinGraphDataset`` will create two directories under ``root``:\n",
"\n",
- "* ``root/raw`` - Contains raw PDB files\n",
+ "* ``root/raw`` - Contains raw PDB files which are downloaded\n",
"* ``root/processed`` - Contains processed graphs (in ``pytorch_geometric.data.Data`` format) saved as ``$PDB.pt / $UNIPROT_ID.pt``"
]
},
@@ -156,6 +158,75 @@
" break"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Load from local path\n",
+ "\n",
+ "\n",
+ "Creating a ``ProteinGraphDataset`` from a list of full path of pdb files:\n",
+ "\n",
+ "* ``root/raw`` - Will be empty since no pdb files are downloaded\n",
+ "* ``root/processed`` - Contains processed graphs (in ``pytorch_geometric.data.Data`` format) saved as ``$PDB.pt / $UNIPROT_ID.pt``"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['../tests/protein/test_data/1lds.pdb', '../tests/protein/test_data/4hhb.pdb', '../tests/protein/test_data/alphafold_structure.pdb']\n"
+ ]
+ }
+ ],
+ "source": [
+ "# import sys\n",
+ "# sys.path.append('../') # add system path for python\n",
+ "\n",
+ "import os \n",
+ "from graphein.protein.config import ProteinGraphConfig\n",
+ "from graphein.ml import ProteinGraphDataset, ProteinGraphListDataset\n",
+ "import torch \n",
+ "\n",
+ "local_dir = \"../tests/protein/test_data/\"\n",
+ "pdb_paths = [os.path.join(local_dir, pdb_path) for pdb_path in os.listdir(local_dir) if pdb_path.endswith(\".pdb\")]\n",
+ "print(pdb_paths)\n",
+ "\n",
+ "# let's load local dataset from local_dir!\n",
+ "ds = ProteinGraphDataset(\n",
+ " root = \"../graphein/ml/datasets/test\",\n",
+ " pdb_paths = pdb_paths,\n",
+ " graphein_config=ProteinGraphConfig(),\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "DataBatch(edge_index=[2, 666], node_id=[2], coords=[2], name=[2], dist_mat=[2], num_nodes=671, batch=[671], ptr=[3])\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Create a dataloader from dataset and inspect a batch\n",
+ "from torch_geometric.loader import DataLoader\n",
+ "dl = DataLoader(ds, batch_size=2, shuffle=True, drop_last=True)\n",
+ "for i in dl:\n",
+ " print(i)\n",
+ " break"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -171,6 +242,8 @@
" # Root directory where the dataset should be saved.\n",
" name: str, \n",
" # Name of the dataset. Will be saved to ``data_$name.pt``.\n",
+ " pdb_paths:Optional[List[str]] =None,\n",
+ " # List of full path of pdb files to load.\n",
" pdb_codes: Optional[List[str]] = None, \n",
" # List of PDB codes to download and parse from the PDB.\n",
" uniprot_ids: Optional[List[str]] = None, \n",
@@ -292,6 +365,124 @@
" break"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Load from local path\n",
+ "\n",
+ "\n",
+ "Creating an ``InMemoryProteinGraphDataset`` from a list of full path of pdb files:\n",
+ "\n",
+ "* ``root/raw`` - Will be empty since no pdb files are downloaded\n",
+ "* ``root/processed`` - Contains processed datasets saved as ``data_{name}.pt``\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['../tests/protein/test_data/1lds.pdb', '../tests/protein/test_data/4hhb.pdb', '../tests/protein/test_data/alphafold_structure.pdb']\n",
+ "Constructing Graphs...\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Processing...\n"
+ ]
+ },
+ {
+ "data": {
+ "application/json": {
+ "ascii": false,
+ "bar_format": null,
+ "colour": null,
+ "elapsed": 0.2526402473449707,
+ "initial": 0,
+ "n": 0,
+ "ncols": null,
+ "nrows": null,
+ "postfix": null,
+ "prefix": "",
+ "rate": null,
+ "total": 3,
+ "unit": "it",
+ "unit_divisor": 1000,
+ "unit_scale": false
+ },
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "d5ed353098664f6f803fa502264df986",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ " 0%| | 0/3 [00:00<?, ?it/s]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Converting Graphs...\n",
+ "Saving Data...\n",
+ "Done!\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Done!\n"
+ ]
+ }
+ ],
+ "source": [
+ "from graphein.ml.datasets.torch_geometric_dataset import InMemoryProteinGraphDataset\n",
+ "\n",
+ "\n",
+ "local_dir = \"../tests/protein/test_data/\"\n",
+ "pdb_paths = [os.path.join(local_dir, pdb_path) for pdb_path in os.listdir(local_dir) if pdb_path.endswith(\".pdb\")]\n",
+ "print(pdb_paths)\n",
+ "\n",
+ "# let's load local dataset from local_dir!\n",
+ "ds = InMemoryProteinGraphDataset(\n",
+ " root = \"../graphein/ml/datasets/test\",\n",
+ " name = \"test\",\n",
+ " pdb_paths = pdb_paths,\n",
+ ")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "DataBatch(edge_index=[2, 951], node_id=[2], coords=[2], name=[2], dist_mat=[2], num_nodes=956, batch=[956], ptr=[3])\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Create a dataloader from dataset and inspect a batch\n",
+ "dl = DataLoader(ds, batch_size=2, shuffle=True, drop_last=True)\n",
+ "for i in dl:\n",
+ " print(i)\n",
+ " break"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -649,11 +840,8 @@
}
],
"metadata": {
- "interpreter": {
- "hash": "2084dd4fc0c9f9186ef9bb5d9f5c6652432726a285d6ac2dcf2b1a616ab39cbb"
- },
"kernelspec": {
- "display_name": "Python 3.8.12 ('graphein-wip')",
+ "display_name": "Python 3.7.13 ('base')",
"language": "python",
"name": "python3"
},
@@ -667,9 +855,14 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.8.12"
+ "version": "3.7.11"
},
- "orig_nbformat": 4
+ "orig_nbformat": 4,
+ "vscode": {
+ "interpreter": {
+ "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe"
+ }
+ }
},
"nbformat": 4,
"nbformat_minor": 2
| Dev
merge dev features into master
| 2022-09-14T07:45:02 | 0.0 | [] | [] |
|||
r0x0r/pywebview | r0x0r__pywebview-1191 | d605fa60bf9347816a182fea4bc4b295d889b142 | diff --git a/webview/platforms/gtk.py b/webview/platforms/gtk.py
index ccbb1d42..7534d985 100755
--- a/webview/platforms/gtk.py
+++ b/webview/platforms/gtk.py
@@ -401,7 +401,7 @@ def create_file_dialog(self, dialog_type, directory, allow_multiple, save_filena
if response == gtk.ResponseType.OK:
if dialog_type == SAVE_DIALOG:
- file_name = dialog.get_filename()
+ file_name = (dialog.get_filename(),)
else:
file_name = dialog.get_filenames()
else:
| Save file dialog returns path as char tuple instead of string
### Specification
- pywebview version: 4.2.2
- python: 3.11.2
- operating system: Linux Debian 12.0
- web renderer: GTK _(Qt is not working and can only display a blank window â note that I'm using KDE)_
### Description
When calling `webview.create_file_dialog` in `SAVE_FILE` mode, it returns the selected path exploded into a tuple of char.
```python
import webview
def bug (window):
result = window.create_file_dialog(webview.SAVE_DIALOG, save_filename='bug.tmp')
print(result)
if __name__ == '__main__':
window = webview.create_window('Save file dialog bug', html='<h1>Test</h1>')
webview.start(bug, window)
```
Expected result: either a string like `/some/where/bug.tmp` or a tuple like `("/some/where/bug.tmp",)` should be printed to stdout.
Actual result: a tuple like `('/', 's', 'o', 'm', 'e', '/', 'w', 'h', 'e', 'r', 'e', '/', 'b', 'u', 'g', '.', 't', 'm', 'p')` is printed to stdout.
### Practicalities
Given what's happing on [lines 613 and 614](https://github.com/r0x0r/pywebview/blob/98dcb8180d5892f0dafd4d07ec3e634616074f96/webview/platforms/qt.py#L613-L614) in the Qt glue:
```python
# Line 613-614 of webview/platforms/qt.py:
elif dialog_type == SAVE_DIALOG or not allow_multiple:
file_names = (self._file_name[0],)
```
My guess would be that [lines 421 and 422](https://github.com/r0x0r/pywebview/blob/98dcb8180d5892f0dafd4d07ec3e634616074f96/webview/platforms/gtk.py#L421-L422 ) of the GTK glue should be changed from this:
```python
if dialog_type == SAVE_DIALOG:
file_name = dialog.get_filename()
```
to this:
```python
if dialog_type == SAVE_DIALOG:
file_name = (dialog.get_filename(),)
```
| 2023-08-08T18:39:22 | 0.0 | [] | [] |
|||
pollen-robotics/reachy2-sdk | pollen-robotics__reachy2-sdk-242 | 44b911b575f0113c0ed658a444df7b53417af90f | diff --git a/src/reachy2_sdk/reachy_sdk.py b/src/reachy2_sdk/reachy_sdk.py
index 50444a6d..af5f3662 100644
--- a/src/reachy2_sdk/reachy_sdk.py
+++ b/src/reachy2_sdk/reachy_sdk.py
@@ -167,6 +167,7 @@ def disconnect(self) -> None:
"get_move_joints_request",
"is_move_finished",
"is_move_playing",
+ "mobile_base",
]:
delattr(self, attr)
| deconnection issue : AttributeError: can't delete attribute 'mobile_base'
| 2024-03-25T13:41:42 | 0.0 | [] | [] |
|||
openatx/uiautomator2 | openatx__uiautomator2-978 | 82ef1f958b6d0d5a88150db282c12224b2e603d1 | diff --git a/uiautomator2/__init__.py b/uiautomator2/__init__.py
index dd64d81..dcea9a4 100644
--- a/uiautomator2/__init__.py
+++ b/uiautomator2/__init__.py
@@ -486,6 +486,23 @@ def press(self, key: Union[int, str], meta=None):
key, meta) if meta else self.jsonrpc.pressKeyCode(key)
else:
return self.jsonrpc.pressKey(key)
+
+ def long_press(self, key: Union[int, str]):
+ """
+ long press key via name or key code
+
+ Args:
+ key: key name or key code
+
+ Examples:
+ long_press("home") same as "adb shell input keyevent --longpress KEYCODE_HOME"
+ """
+ with self._operation_delay("press"):
+ if isinstance(key, int):
+ self.shell("input keyevent --longpress %d" % key)
+ else:
+ key = key.upper()
+ self.shell(f"input keyevent --longpress {key}")
def screen_on(self):
self.jsonrpc.wakeUp()
| æ¯å¦è½å®ç°long_press() keycode çåè½
- ææºåå·ï¼No
- uiautomator2ççæ¬å·ï¼2.16.23
- ææºæªå¾ï¼No
- ç¸å
³æ¥å¿(Pythonæ§å¶å°é误信æ¯, adb logcatå®æ´ä¿¡æ¯, atxagent.logæ¥å¿)
- æ好è½éä¸å¯è½å¤ç°é®é¢ç代ç ã
èæ¯ï¼Android TVä¸æ¨¡æé¥æ§å¨é¿æOKé®ï¼æ æ³å®ç°ãèä¸è¯¥å
ç´ ç¨long_click()æ¯æ æçã
| å¯ä»¥å
ç¨ adb shell input keyevent --longpress VOLUME_UP ä»£æ¿ | 2024-05-23T09:11:22 | 0.0 | [] | [] |
||
zms-publishing/ZMS | zms-publishing__ZMS-35 | a1b350f02e869d06c416bcd3020908b3ee29fae0 | diff --git a/Products/zms/ZMSMetaobjManager.py b/Products/zms/ZMSMetaobjManager.py
index 7721b201a..e91cbc9c6 100644
--- a/Products/zms/ZMSMetaobjManager.py
+++ b/Products/zms/ZMSMetaobjManager.py
@@ -875,7 +875,7 @@ def setMetaobjAttr(self, id, oldId, newId, newName='', newMandatory=0, newMultil
mapTypes = {'method':'DTML Method','py':'Script (Python)','zpt':'Page Template'}
message = ''
if newType in ['interface']:
- newType = standard.dt_executable(self, newCustom)
+ newType = standard.dt_executable(self, standard.pystr(newCustom, encoding='utf-8', errors='replace'))
if not newType:
newType = 'method'
newName = '%s: %s'%(newId, newType)
diff --git a/Products/zms/ZMSRepositoryManager.py b/Products/zms/ZMSRepositoryManager.py
index f118001b6..636b21f6a 100644
--- a/Products/zms/ZMSRepositoryManager.py
+++ b/Products/zms/ZMSRepositoryManager.py
@@ -217,28 +217,31 @@ def getDiffs(self, provider, ignore=True):
# if there are no references in model
continue
l = local.get(filename, {})
+ l_data = l.get('data')
r = remote.get(filename, {})
- if isinstance(l.get('data'), bytes):
+ r_data = r.get('data')
+ # Check whether any bytes data are decodeable as utf-8 text
+ if isinstance(l_data, bytes):
try:
- l['data'] = l['data'].decode('utf-8')
+ l['data'] = l_data.decode('utf-8')
except: # data is no text, but image etc.
pass
- if isinstance(r.get('data'), bytes):
+ if isinstance(r_data, bytes):
try:
- r['data'] = r['data'].decode('utf-8')
+ r['data'] = r_data.decode('utf-8')
except:
pass
- # Normalize Windows CR+LF line break to Unix LF in string objects
+ # If text then normalize Windows CR+LF line break to Unix LF
if isinstance(l.get('data'), str):
- l['data'] = l['data'].replace('\r\n','\n')
+ l['data'] = l['data'].replace('\r','')
if isinstance(r.get('data'), str):
- r['data'] = r['data'].replace('\r\n','\n')
+ r['data'] = r['data'].replace('\r','')
+ # Only if text is not equal add to diff list
if l.get('data') != r.get('data'):
- data = l.get('data', r.get('data'))
- try:
- mt, enc = standard.guess_content_type(filename.split('/')[-1], data.encode('utf-8'))
- except:
- mt, enc = standard.guess_content_type(filename.split('/')[-1], data)
+ data = l_data or r_data
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+ mt, enc = standard.guess_content_type(filename.split('/')[-1], data)
diff.append((filename, mt, l.get('id', r.get('id', '?')), l, r))
return diff
diff --git a/Products/zms/conf/metaobj_manager/com.zms.index/__init__.py b/Products/zms/conf/metaobj_manager/com.zms.index/__init__.py
index 4e7cb5642..a499994b7 100644
--- a/Products/zms/conf/metaobj_manager/com.zms.index/__init__.py
+++ b/Products/zms/conf/metaobj_manager/com.zms.index/__init__.py
@@ -16,7 +16,7 @@ class com_zms_index:
package = ""
# Revision
- revision = "2.1.2"
+ revision = "2.1.3"
# Type
type = "ZMSPackage"
diff --git a/Products/zms/standard.py b/Products/zms/standard.py
index 3ef992194..4a759c481 100644
--- a/Products/zms/standard.py
+++ b/Products/zms/standard.py
@@ -67,12 +67,12 @@ def is_bytes(v):
security.declarePublic('pystr')
pystr_ = str
-def pystr(v, encoding='utf-8', errors='ignore'):
- if isinstance(v,bytes):
- v = v.decode(encoding)
- elif not isinstance(v,str):
+def pystr(v, encoding='utf-8', errors='strict'):
+ if isinstance(v, bytes):
+ v = v.decode(encoding, errors)
+ elif not isinstance(v, str):
try:
- v = str(v,encoding)
+ v = str(v, encoding, errors)
except:
v = str(v)
return v
diff --git a/Products/zms/zopeutil.py b/Products/zms/zopeutil.py
index 0942eac33..1b0f0632e 100644
--- a/Products/zms/zopeutil.py
+++ b/Products/zms/zopeutil.py
@@ -72,8 +72,14 @@ def addObject(container, meta_type, id, title, data, permissions={}):
Add Zope-object to container.
"""
if meta_type == 'DTML Document':
+ if not isinstance(data, str):
+ # Enforce to utf-8 text
+ data = standard.pystr(data, encoding='utf-8', errors='replace').encode('utf-8')
addDTMLDocument( container, id, title, data)
elif meta_type == 'DTML Method':
+ if not isinstance(data, str):
+ # Enforce to utf-8 text
+ data = standard.pystr(data, encoding='utf-8', errors='replace').encode('utf-8')
addDTMLMethod( container, id, title, data)
elif meta_type == 'External Method':
addExternalMethod( container, id, title, data)
@@ -84,6 +90,9 @@ def addObject(container, meta_type, id, title, data, permissions={}):
data = data.encode('utf-8')
addImage( container, id, title, data)
elif meta_type == 'Page Template':
+ if not isinstance(data, str):
+ # Enforce to utf-8 text
+ data = standard.pystr(data, encoding='utf-8', errors='replace').encode('utf-8')
addPageTemplate( container, id, title, data)
elif meta_type == 'Script (Python)':
addPythonScript( container, id, title, data)
| ZMSRepoManager.getDiffs fails
The latest changes https://github.com/zms-publishing/ZMS/commit/9d8b111cb2cc58ac21340de2c5b6d5acb709d4c7 and https://github.com/zms-publishing/ZMS/commit/a82bc5e4f6b2d125b165035053536c24b87936be result in
```
Module Products.zms.ZMSRepositoryManager, line 233, in getDiffs
TypeError: a bytes-like object is required, not 'str'
- Expression: "s python:[(x,here.getDiffs(x,here.get_ignore_orphans())) for x in here.getRepositoryProviders("
- Filename: manage_main_diff
- Location: (line 9: col 14)
- Expression: "python:here.manage_main_diff(here,request)"
- Filename: manage_main
- Location: (line 93: col 36)
```
| Many thanks for the hint. The reported error occured only if content model does contain binary data like images, which cannot be transformed to a string. Background of the former change was a ZMS setup running on Windows and code modifications resulted in a change of the line breaks. These could be considered as false positive diffs and should be ignored.
The latest change https://github.com/zms-publishing/ZMS/commit/c096f41ae5af5f38ec02d03f6e995f6542dee1ab breaks in a legacy environment and results in
```
File "/Workspace/Products/zms/ZMSRepositoryManager.py", line 222, in getDiffs
print(l['data'].decode('utf-8'))
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb3 in position 10: invalid start byte
```
It is caused by an existing GIF resource in the content model.
The following patch seems to solve it:
```
if isinstance(l.get('data'), bytes):
- l['data'] = l['data'].decode('utf-8')
+ l['data'] = l['data'].decode('utf-8', errors="replace")
if isinstance(r.get('data'), bytes):
- r['data'] = r['data'].decode('utf-8')
+ r['data'] = r['data'].decode('utf-8', errors="replace")
# Normalize Windows CR+LF line break to Unix LF in string objects
if isinstance(l.get('data'), str):
l['data'] = l['data'].replace('\r\n','\n')
```
According to my tests ('replace' can still result in UnicodeDecodeError) and similar discussions (https://stackoverflow.com/questions/34869889/what-is-the-proper-way-to-determine-if-an-object-is-a-bytes-like-object-in-pytho) I would stick with the duck typing:

The opposite comparison (binary in FS and text in ZODB) detects different files (but does not show the diff in detail, good enough for syncing)

Many thanks again! | 2021-12-29T19:17:33 | 0.0 | [] | [] |
||
nikulpatel3141/ETF-Scraper | nikulpatel3141__ETF-Scraper-2 | 616ccb5acda90fc2cee44d2fe49d42bd5c440936 | diff --git a/src/etf_scraper/scrapers.py b/src/etf_scraper/scrapers.py
index db458fe..9d1c441 100644
--- a/src/etf_scraper/scrapers.py
+++ b/src/etf_scraper/scrapers.py
@@ -1,5 +1,5 @@
import logging
-from typing import Union
+from typing import Union, List
from urllib.parse import urljoin
from io import StringIO
from datetime import date, datetime
@@ -60,7 +60,7 @@ class ISharesListings(ProviderListings):
exp_cols = ["productPageUrl", "localExchangeTicker"] # bare minimum to be returned
holding_col_mapping = {
- "Ticker": "ticker",
+ "Ticker": "ticker", # for equity funds
"Name": "name",
"Sector": "sector",
"Asset Class": "asset_class",
@@ -74,11 +74,54 @@ class ISharesListings(ProviderListings):
"Currency": "currency",
"FX Rate": "fx_rate",
"Market Currency": "market_currency",
+ "Par Value": "par_value", # for bond funds
+ "CUSIP": "cusip",
+ "ISIN": "isin",
+ "SEDOL": "sedol",
+ "Duration": "duration",
+ "YTM (%)": "yield_to_maturity",
+ "Maturity": "maturity",
+ "Coupon (%)": "coupon",
+ "Mod. Duration": "modified_duration",
+ "Yield to Call (%)": "yield_to_call",
+ "Yield to Worst (%)": "yield_to_worst",
+ "Real Duration": "real_duration",
+ "Real YTM (%)": "real_ytm",
+ "Accrual Date": "accrual_date",
+ "Effective Date": "effective_date",
}
- exp_holding_cols = ["Ticker", "Shares", "Market Value"]
+
+ holdings_string_cols = ["ticker", "cusip", "isin", "sedol"]
+ holdings_numeric_cols = [
+ "amount",
+ "weight",
+ "market_value",
+ "price",
+ "notional_value",
+ "duration",
+ "yield_to_maturity",
+ "coupon",
+ "modified_duration",
+ "yield_to_call",
+ "yield_to_worst",
+ "real_duration",
+ "real_ytm",
+ ]
+ holdings_date_cols = ["accrual_date", "effective_date"]
_fund_type_map = {"etf": "ETF", "mutualfund": "MF"}
+ @classmethod
+ def exp_holding_cols(cls, asset_class: str) -> List[str]:
+ if asset_class in ("Equity", "Real Estate", "Commodity"):
+ return ["Ticker", "Shares", "Market Value"]
+ elif asset_class == "Fixed Income":
+ return ["CUSIP", "Market Value"]
+ else:
+ raise NotImplementedError(
+ f"iShares holdings scraper not implemented for {asset_class}"
+ )
+
@classmethod
def retrieve_listings(cls):
listing_url = urljoin(cls.host, cls.listing_endpoint)
@@ -122,7 +165,11 @@ def parse_net(x):
return resp_df_.reset_index(drop=True)
@classmethod
- def _parse_holdings_resp(cls, resp_content):
+ def _parse_holdings_date(cls, date_str: str) -> date:
+ return datetime.strptime(date_str, "%b %d, %Y").date() # eg "Jan 03, 2022"
+
+ @classmethod
+ def _parse_holdings_resp(cls, resp_content, asset_class="Equity"):
header_rows = 9
raw_data = StringIO(resp_content)
summary_data = [raw_data.readline().rstrip("\n") for _ in range(header_rows)]
@@ -148,9 +195,7 @@ def _parse_holdings_resp(cls, resp_content):
logger.debug(f"Found reported holdings date string {as_of_date}")
logger.debug("Attempting to parse holdings data")
- as_of_date = datetime.strptime(
- as_of_date, "%b %d, %Y"
- ).date() # eg "Jan 03, 2022"
+ as_of_date = cls._parse_holdings_date(as_of_date)
if summary_data[-1] != "\xa0":
logger.warning(
@@ -179,20 +224,40 @@ def _parse_holdings_resp(cls, resp_content):
na_values="-",
)
- check_missing_cols(cls.exp_holding_cols, holdings_df.columns, raise_error=True)
-
+ check_missing_cols(
+ cls.exp_holding_cols(asset_class), holdings_df.columns, raise_error=True
+ )
holdings_df = holdings_df.rename(columns=cls.holding_col_mapping)
+
+ def _parse_holdings_date_(x):
+ try:
+ return cls._parse_holdings_date(x)
+ except:
+ return pd.NaT
+
+ for col in holdings_df:
+ if col in cls.holdings_date_cols:
+ holdings_df.loc[:, col] = holdings_df[col].apply(_parse_holdings_date_)
+ elif col in cls.holdings_string_cols:
+ holdings_df.loc[:, col] = holdings_df[col].astype(str)
+
holdings_df = holdings_df[~holdings_df["weight"].isna()]
- strip_str_cols(holdings_df, ["ticker"])
+ strip_str_cols(
+ holdings_df, [k for k in cls.holdings_string_cols if k in holdings_df]
+ )
set_numeric_cols(
- holdings_df, ["amount", "weight", "market_value", "price", "notional_value"]
+ holdings_df, [k for k in cls.holdings_numeric_cols if k in holdings_df]
)
return holdings_df, as_of_date
@classmethod
def retrieve_holdings(
- cls, ticker: str, product_url: str, holdings_date: Union[date, None]
+ cls,
+ ticker: str,
+ product_url: str,
+ holdings_date: Union[date, None],
+ asset_class: str = "Equity",
):
"""Query for IShares product holdings
Args:
@@ -215,7 +280,7 @@ def retrieve_holdings(
resp.raise_for_status()
holdings_df, as_of_date = cls._parse_holdings_resp(
- resp.content.decode(encoding="UTF-8-SIG")
+ resp.content.decode(encoding="UTF-8-SIG"), asset_class
)
if holdings_date:
@@ -231,7 +296,10 @@ def retrieve_holdings(
def _retrieve_holdings(cls, sec_listing: SecurityListing, holdings_date: date):
_check_exp_provider(sec_listing.provider, cls.provider, cls.__name__)
return cls.retrieve_holdings(
- sec_listing.ticker, sec_listing.product_url, holdings_date
+ sec_listing.ticker,
+ sec_listing.product_url,
+ holdings_date,
+ sec_listing.asset_class,
)
| Fix for iShares credit funds
Currently only works for Equity funds. Credit funds have a different response file, but this should be an easy fix
```python
>>> holdings_df = etf_scraper.query_holdings("AGG", holdings_date)
ERROR:etf_scraper.utils:Missing expectd columns ['Ticker', 'Shares']
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.10/site-packages/etf_scraper/api.py", line 117, in query_holdings
return scraper._retrieve_holdings(sec_listing, holdings_date)
File "/usr/local/lib/python3.10/site-packages/etf_scraper/scrapers.py", line 209, in _retrieve_holdings
return cls.retrieve_holdings(
File "/usr/local/lib/python3.10/site-packages/etf_scraper/scrapers.py", line 193, in retrieve_holdings
holdings_df, as_of_date = cls._parse_holdings_resp(
File "/usr/local/lib/python3.10/site-packages/etf_scraper/scrapers.py", line 157, in _parse_holdings_resp
check_missing_cols(cls.exp_holding_cols, holdings_df.columns, raise_error=True)
File "/usr/local/lib/python3.10/site-packages/etf_scraper/utils.py", line 23, in check_missing_cols
raise ValueError(
ValueError: Missing required columns from response. Got Index(['Name', 'Sector', 'Asset Class', 'Market Value', 'Weight (%)',
'Notional Value', 'Par Value', 'CUSIP', 'ISIN', 'SEDOL', 'Location',
'Exchange', 'Currency', 'Duration', 'YTM (%)', 'FX Rate', 'Maturity',
'Coupon (%)', 'Mod. Duration', 'Yield to Call (%)',
'Yield to Worst (%)', 'Real Duration', 'Real YTM (%)',
'Market Currency', 'Accrual Date', 'Effective Date'],
dtype='object')Was expecting at least all of ['Ticker', 'Shares', 'Market Value']
```
| 2023-01-25T22:06:28 | 0.0 | [] | [] |
|||
juglab/cryoCARE_pip | juglab__cryoCARE_pip-18 | 8502b75531d4cff3dbb6b2b361e89ddb9a0d0771 | diff --git a/README.md b/README.md
index a13d966..d3ef700 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,48 @@
# cryoCARE
-This package is a memory efficient implementation of [cryoCARE](https://github.com/juglab/cryoCARE_T2T).
+
+This package is a memory efficient implementation of [cryoCARE](https://github.com/juglab/cryoCARE_T2T).
+
This setup trains a denoising U-Net for tomographic reconstruction according to the [Noise2Noise](https://arxiv.org/pdf/1803.04189.pdf) training paradigm.
-Therefor the user has to provide two tomograms of the same sample.
+Therefore the user has to provide two tomograms of the same sample.
The simplest way to achieve this is with direct-detector movie-frames.
-These movie-frames can be split in two halves (e.g. with MotionCor2 `-SplitSum 1` or with IMOD `alignframes -debug 10000`) from which two identical, up to random noise, tomograms can be reconsturcted.
+
+You can use Warp to generate two reconstructed tomograms based on the even/odd frames. Alternatively, the movie-frames can be split in two halves (e.g. with MotionCor2 `-SplitSum 1` or with IMOD `alignframes -debug 10000`) from which two identical, up to random noise, tomograms can be reconstructed.
+
These two (even and odd) tomograms can be used as input to this cryoCARE implementation.
+## Changelog
+
+### Version 0.2
+
+* `cyroCARE_train` produces a compressed and more portable model. This model can be copied and shared with others without relying on a certain folder structure.
+* `cryoCARE_predict` supports to predict multiple tomograms in one run. Streamlined configuration with respect to the changes of `cryoCARE_train`.
+* Streamlined installation instructions
+* Minor changes/ fixed couple of bugs:
+ * Proper padding of tomograms to avoid black frames in the denoised tomograms
+ * Fix computation of validation cut off for small tomograms
+ * Fix `cryoCARE_predict` if no tiling happens
+
+## Installation
+
+__Note:__ We assume that you have [miniconda](https://docs.conda.io/en/latest/miniconda.html) installed.
+
+First you need to create a conda environment.
+
+### For CUDA 11:
+```
+conda create -n cryocare_11 python=3.8 cudatoolkit=11.0 cudnn=8.0 -c conda-forge
+conda activate cryocare_11
+pip install tensorflow==2.4
+pip install cryoCARE
+```
+
+### For CUDA 10:
+```
+conda create -n cryocare -c conda-forge -c anaconda python=3 keras-gpu=2.3.1
+conda activate cryocare
+pip install cryoCARE
+```
+
## Manual
cryoCARE uses `.json` configuration files and is run in three steps:
@@ -39,7 +76,7 @@ Create an empty file called `train_data_config.json`, copy-paste the following t
* `"num_slices"`: Number of sub-volumes extracted per tomograms.
* `"tilt_axis"`: Tilt-axis of the tomograms. We split the tomogram along this axis to extract train- and validation data separately.
* `"n_normalization_samples"`: Number of sub-volumes extracted per tomograms, which are used to compute `mean` and `standard deviation` for normalization.
-* `"path"`: Reference path for the project. All output will be saved here. In this step, the training and validation data is saved here.
+* `"path"`: The training and validation data are saved here.
#### Run Training Data Preparation:
After installation of the package we have access to built in Python-scripts which we can call.
@@ -73,56 +110,38 @@ Create an empty file called `train_config.json`, copy-paste the following templa
* `"unet_n_first"`: Number of initial feature channels.
* `"learning_rate"`: Learning rate of the model training.
* `"model_name"`: Name of the model.
-* `"path"`: Needs to be the same as in step 1. In this step the model is saved here.
+* `"path"`: Output path for the model.
#### Run Training:
To run the training we run the following command:
`cryoCARE_train.py --conf train_config.json`
+You will find a `.tar.gz` file in the directory you specified as `path`. This your model an will be used in the next step.
+
### 3. Prediction
Create an empty file called `predict_config.json`, copy-paste the following template and fill it in.
```
{
- "model_name": "model_name",
- "path": "./",
- "even": "/path/to/even.rec",
- "odd": "/path/to/odd.rec",
+ "path": "path/to/your/model.tar.gz",
+ "even": "/path/to/even/tomos/",
+ "odd": "/path/to/odd/tomos/",
"n_tiles": [1, 1, 1],
- "output_name": "denoised.rec"
+ "output": "/path/to/output/folder/",
+ "overwrite": false
}
```
-__Note:__ Currently only a single tomogram can be denoised at a time i.e. if you want to denoise multiple tomograms you have to run this step for each tomo individually.
#### Parameters:
-* `"model_name"`: Name of the trained model which should be used.
-* `"path"`: Path to the parent directory where the model is stored. This corresponds to `"path"` in the `train_config.json`.
-* `"even"`: Path to the even tomogram.
-* `"odd"`: Path to the odd tomogram.
+* `"path"`: Path to your model file.
+* `"even"`: Path to directory with even tomograms or a specific even tomogram.
+* `"odd"`: Path to directory with odd tomograms or a specific odd tomogram.
* `"n_tiles"`: Initial tiles per dimension. Gets increased if the tiles do not fit on the GPU.
-* `"output_name"`: Name of the denoised tomogram.
+* `"output"`: Path where the denoised tomograms will be written.
#### Run Prediction:
To run the training we run the following command:
`cryoCARE_predict.py --conf predict_config.json`
-## Installation
-Create the following conda environment with:
-```
-conda create -n cryocare -c conda-forge -c anaconda python=3 keras-gpu=2.3.1
-```
-
-Then activate it with:
-```
-conda activate cryocare
-```
-
-Then you can install cryoCARE with pip:
-```
-pip install cryoCARE
-```
-
-__Note:__ I would recommend to use [miniconda](https://docs.conda.io/en/latest/miniconda.html) or [singularity](https://sylabs.io/guides/3.0/user-guide/quick_start.html) to manage environments and versions.
-
## How to Cite
```
@inproceedings{buchholz2019cryo,
diff --git a/cryocare/internals/CryoCARE.py b/cryocare/internals/CryoCARE.py
index a4a9bbd..ec52d95 100644
--- a/cryocare/internals/CryoCARE.py
+++ b/cryocare/internals/CryoCARE.py
@@ -3,7 +3,7 @@
from csbdeep.models import CARE
from csbdeep.utils import _raise, axes_check_and_normalize, axes_dict
import warnings
-
+import logging
import numpy as np
import tensorflow as tf
@@ -29,6 +29,7 @@ def train(self, train_dataset, val_dataset, epochs=None, steps_per_epoch=None):
``History`` object
See `Keras training history <https://keras.io/models/model/#fit>`_.
"""
+ logging.getLogger("tensorflow").setLevel(logging.ERROR)
axes = axes_check_and_normalize('S' + self.config.axes, len(train_dataset.element_spec[0].shape) + 1)
ax = axes_dict(axes)
@@ -254,6 +255,10 @@ def predict_tiled(keras_model, even, odd, output, s_src_out, s_dst_out, mean, st
pred = pred[src]
if pbar is not None:
pbar.update()
+
+ if output.shape == pred.shape:
+ output[:] = pred[:]
+
return pred
###
diff --git a/cryocare/internals/CryoCAREDataModule.py b/cryocare/internals/CryoCAREDataModule.py
index 37d6e5d..750a238 100644
--- a/cryocare/internals/CryoCAREDataModule.py
+++ b/cryocare/internals/CryoCAREDataModule.py
@@ -212,8 +212,8 @@ def __compute_extraction_shapes__(self, even_path, odd_path, tilt_axis_index, sa
assert even.data.shape[2] > 2 * sample_shape[2]
val_cut_off = int(even.data.shape[tilt_axis_index] * validation_fraction)
- if even.data.shape[tilt_axis_index] - val_cut_off < sample_shape[tilt_axis_index]:
- val_cut_off = even.data.shape[tilt_axis_index] - sample_shape[tilt_axis_index]
+ if ((even.data.shape[tilt_axis_index] - val_cut_off) < sample_shape[tilt_axis_index]) or val_cut_off < sample_shape[tilt_axis_index]:
+ val_cut_off = even.data.shape[tilt_axis_index] - sample_shape[tilt_axis_index] - 1
extraction_shape_train = [[0, even.data.shape[0]], [0, even.data.shape[1]], [0, even.data.shape[2]]]
extraction_shape_val = [[0, even.data.shape[0]], [0, even.data.shape[1]], [0, even.data.shape[2]]]
diff --git a/cryocare/scripts/cryoCARE_extract_train_data.py b/cryocare/scripts/cryoCARE_extract_train_data.py
index b6f67c5..3a9b65b 100644
--- a/cryocare/scripts/cryoCARE_extract_train_data.py
+++ b/cryocare/scripts/cryoCARE_extract_train_data.py
@@ -2,6 +2,8 @@
import argparse
import json
import warnings
+import os
+import sys
from cryocare.internals.CryoCAREDataModule import CryoCARE_DataModule
@@ -25,6 +27,16 @@ def main():
dm.setup(config['odd'], config['even'], n_samples_per_tomo=config['num_slices'],
validation_fraction=(1.0 - config['split']), sample_shape=config['patch_shape'],
tilt_axis=config['tilt_axis'], n_normalization_samples=config['n_normalization_samples'])
+
+ try:
+ os.makedirs(config['path'])
+ except OSError:
+ if 'overwrite' in config and config['overwrite']:
+ os.makedirs(config['path'], exist_ok=True)
+ else:
+ print("Output directory already exists. Please choose a new output directory or set 'overwrite' to 'true' in your configuration file.")
+ sys.exit(1)
+
dm.save(config['path'])
diff --git a/cryocare/scripts/cryoCARE_predict.py b/cryocare/scripts/cryoCARE_predict.py
index 2392900..46fe372 100644
--- a/cryocare/scripts/cryoCARE_predict.py
+++ b/cryocare/scripts/cryoCARE_predict.py
@@ -2,43 +2,64 @@
import argparse
import json
from os.path import join
+import os
+import tarfile
+import tempfile
import datetime
import mrcfile
import numpy as np
+import sys
+from typing import Tuple
from cryocare.internals.CryoCARE import CryoCARE
from cryocare.internals.CryoCAREDataModule import CryoCARE_DataModule
import psutil
+def pad(volume: np.array, div_by: Tuple) -> np.array:
+ pads = []
+ for axis_index, axis_size in enumerate(volume.shape):
+ pad_by = axis_size%div_by[axis_index]
+ pads.append([0,pad_by])
+ volume_padded = np.pad(volume, pads, mode='mean')
-def main():
- parser = argparse.ArgumentParser(description='Run cryoCARE prediction.')
- parser.add_argument('--conf')
+ return volume_padded
- args = parser.parse_args()
- with open(args.conf, 'r') as f:
- config = json.load(f)
- dm = CryoCARE_DataModule()
- dm.load(config['path'])
+def denoise(config: dict, mean: float, std: float, even: str, odd: str, output_file: str):
model = CryoCARE(None, config['model_name'], basedir=config['path'])
- even = mrcfile.mmap(config['even'], mode='r', permissive=True)
- odd = mrcfile.mmap(config['odd'], mode='r', permissive=True)
- denoised = mrcfile.new_mmap(join(config['path'], config['output_name']), even.data.shape, mrc_mode=2,
- overwrite=True)
+ even = mrcfile.mmap(even, mode='r', permissive=True)
+ odd = mrcfile.mmap(odd, mode='r', permissive=True)
+ shape_before_pad = even.data.shape
+ even_vol = even.data
+ odd_vol = odd.data
+ even_vol = even_vol
+ odd_vol = odd_vol
+
+ div_by = model._axes_div_by('XYZ')
- even.data.shape += (1,)
- odd.data.shape += (1,)
- denoised.data.shape += (1,)
+ even_vol = pad(even_vol,div_by=div_by)
+ odd_vol = pad(odd_vol, div_by=div_by)
- mean, std = dm.train_dataset.mean, dm.train_dataset.std
+ denoised = np.zeros(even_vol.shape)
- model.predict(even.data, odd.data, denoised.data, axes='ZYXC', normalizer=None, mean=mean, std=std,
+ even_vol.shape += (1,)
+ odd_vol.shape += (1,)
+ denoised.shape += (1,)
+
+ model.predict(even_vol, odd_vol, denoised, axes='ZYXC', normalizer=None, mean=mean, std=std,
n_tiles=config['n_tiles'] + [1, ])
+ denoised = denoised[slice(0, shape_before_pad[0]), slice(0, shape_before_pad[1]), slice(0, shape_before_pad[2])]
+ mrc = mrcfile.new_mmap(output_file, denoised.shape, mrc_mode=2, overwrite=True)
+ mrc.data[:] = denoised
+
+
+
+
+
for l in even.header.dtype.names:
if l == 'label':
new_label = np.concatenate((even.header[l][1:-1], np.array([
@@ -46,10 +67,71 @@ def main():
"%d-%b-%y %H:%M:%S") + " "]),
np.array([''])))
print(new_label)
- denoised.header[l] = new_label
+ mrc.header[l] = new_label
else:
- denoised.header[l] = even.header[l]
- denoised.header['mode'] = 2
+ mrc.header[l] = even.header[l]
+ mrc.header['mode'] = 2
+
+def main():
+ parser = argparse.ArgumentParser(description='Run cryoCARE prediction.')
+ parser.add_argument('--conf')
+
+ args = parser.parse_args()
+ with open(args.conf, 'r') as f:
+ config = json.load(f)
+
+ try:
+ os.makedirs(config['output'])
+ except OSError:
+ if 'overwrite' in config and config['overwrite']:
+ os.makedirs(config['output'], exist_ok=True)
+ else:
+ print("Output directory already exists. Please choose a new output directory or set 'overwrite' to 'true' in your configuration file.")
+ sys.exit(1)
+
+
+ if os.path.isfile(config['path']):
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ tar = tarfile.open(config['path'], "r:gz")
+ tar.extractall(tmpdirname)
+ tar.close()
+ config['model_name'] = os.listdir(tmpdirname)[0]
+ config['path'] = os.path.join(tmpdirname)
+ with open(os.path.join(tmpdirname,config['model_name'],"norm.json")) as f:
+ norm_data = json.load(f)
+ mean = norm_data["mean"]
+ std = norm_data["std"]
+
+
+
+ from glob import glob
+ if type(config['even']) is list:
+ all_even=tuple(config['even'])
+ all_odd=tuple(config['odd'])
+ elif os.path.isdir(config['even']) and os.path.isdir(config['odd']):
+ all_even = glob(os.path.join(config['even'],"*.mrc"))
+ all_odd = glob(os.path.join(config['odd'],"*.mrc"))
+ else:
+ all_even = [config['even']]
+ all_odd = [config['odd']]
+
+ for even,odd in zip(all_even,all_odd):
+ out_filename = os.path.join(config['output'], os.path.basename(even))
+ denoise(config, mean, std, even=even, odd=odd, output_file=out_filename)
+ else:
+ # Fall back to original cryoCARE implmentation
+ print("Your config is not in the format that cryoCARE >=0.2 requires. Fallback to cryCARE 0.1 format.")
+ if 'output_name' not in config or os.path.isfile(config['path']):
+ print("Invalid config format.")
+ sys.exit(1)
+
+ dm = CryoCARE_DataModule()
+ dm.load(config['path'])
+ mean, std = dm.train_dataset.mean, dm.train_dataset.std
+
+ denoise(config, mean, std, even=config['even'], odd=config['odd'], output_file=join(config['path'], config['output_name']))
+
+
if __name__ == "__main__":
diff --git a/cryocare/scripts/cryoCARE_train.py b/cryocare/scripts/cryoCARE_train.py
index 6070cae..52e3f3d 100644
--- a/cryocare/scripts/cryoCARE_train.py
+++ b/cryocare/scripts/cryoCARE_train.py
@@ -34,10 +34,26 @@ def main():
model = CryoCARE(net_conf, config['model_name'], basedir=config['path'])
history = model.train(dm.get_train_dataset(), dm.get_val_dataset())
+ mean, std = dm.train_dataset.mean, dm.train_dataset.std
- print(list(history.history.keys()))
with open(join(config['path'], config['model_name'], 'history.dat'), 'wb+') as f:
pickle.dump(history.history, f)
+
+ # Write norm to disk
+ norm = {
+ "mean": float(mean),
+ "std": float(std)
+ }
+ with open(join(config['path'], config['model_name'], 'norm.json'), 'w') as fp:
+ json.dump(norm, fp)
+
+ import tarfile
+ import os
+ with tarfile.open(join(config['path'], f"{config['model_name']}.tar.gz"), "w:gz") as tar:
+ tar.add(join(config['path'], config['model_name']), arcname=os.path.basename(join(config['path'], config['model_name'])))
+
+
+
if __name__ == "__main__":
main()
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 5506467..54fa5b6 100644
--- a/setup.py
+++ b/setup.py
@@ -5,9 +5,9 @@
setuptools.setup(
name="cryoCARE",
- version="0.1.1",
- author="Tim-Oliver Buchholz",
- author_email="[email protected]",
+ version="0.2",
+ author="Tim-Oliver Buchholz, Thorsten Wagner",
+ author_email="[email protected], [email protected]",
description="cryoCARE is a deep learning approach for cryo-TEM tomogram denoising.",
long_description=long_description,
long_description_content_type="text/markdown",
@@ -24,8 +24,7 @@
install_requires=[
"numpy",
"mrcfile",
- "keras>=2.1.2,<2.4.0",
- "csbdeep>=0.6.0,<0.7.0",
+ "csbdeep>=0.7.0,<0.8.0",
"psutil"
],
scripts=[
| Create path before saving training data to disk
The folder `config['path']` should be created before saving training data to disk. Otherwise it will crash.
n_tiles [1,1,1] produces black volume
Hi,
when I run cryoCARE on my local setup, it typically crashes with a OOM error and then tries n_tiles (1,1, 2) and runs through. The results look fine :-)
If I run the same cryoCARE setup on our cluster with RTX6000 GPUs it only produces a black volume. However, if you set n_tiles to (1,1,2) in the prediction config, it works well.
So, there seems to be bug when running it only on a single tile. Any ideas @tibuch ?
Best,
Thorsten
| 2022-08-08T08:49:26 | 0.0 | [] | [] |
|||
TDAmeritrade/stumpy | TDAmeritrade__stumpy-888 | f349385e2903e53edb1476b6eebf3ec9922da03b | diff --git a/stumpy/core.py b/stumpy/core.py
index d9f119364..41e5dff28 100644
--- a/stumpy/core.py
+++ b/stumpy/core.py
@@ -3956,7 +3956,6 @@ def _mdl(disc_subseqs, disc_neighbors, S, n_bit=8):
@njit(
# "(i8, i8, f8[:, :], f8[:], i8, f8[:, :], i8[:, :], f8)",
- parallel=True,
fastmath={"nsz", "arcp", "contract", "afn", "reassoc"},
)
def _compute_multi_PI(d, idx, D, D_prime, range_start, P, I, p=2.0):
@@ -3999,7 +3998,7 @@ def _compute_multi_PI(d, idx, D, D_prime, range_start, P, I, p=2.0):
"""
D_prime[:] = 0.0
for i in range(d):
- D_prime = D_prime + np.power(D[i], 1.0 / p)
+ D_prime[:] = D_prime + np.power(D[i], 1.0 / p)
min_index = np.argmin(D_prime)
pos = idx - range_start
| numba JIT-compiled func `core._compute_multi_PI` is not parallel?
The funciton `core._compute_multi_PI` seems to be parallel according to its decorator:
https://github.com/TDAmeritrade/stumpy/blob/76cb9806dec2d32f80ff20a5a3c484c34b7a54bf/stumpy/core.py#L3955-L3960
However, it uses `range` instead of `prange`:
https://github.com/TDAmeritrade/stumpy/blob/76cb9806dec2d32f80ff20a5a3c484c34b7a54bf/stumpy/core.py#L3997-L3998
| I'm guessing that, at some point, it was using `prange` but we found that it was inefficient and we moved `prange` to the function that called `compute_multi_PI`. What happens when you remove `parallel=True`? Does it affect the performance? If not, it can be removed. | 2023-07-23T03:56:58 | 0.0 | [] | [] |
||
feagi/feagi | feagi__feagi-47 | 2012bad7f6951ddc206bc86422e3e46cd1f64749 | diff --git a/3rd_party/ros/Micro-ROS/setup_micro_ros.sh b/3rd_party/ros/Micro-ROS/setup_micro_ros.sh
deleted file mode 100644
index b92fab3b5..000000000
--- a/3rd_party/ros/Micro-ROS/setup_micro_ros.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-#Install micro-ros
-cd ~
-mkdir micro_ros_arduino
-cd ~/micro_ros_arduino
-source /opt/ros/$ROS_DISTRO/setup.bash
-git clone -b foxy https://github.com/micro-ROS/micro_ros_setup.git src/micro_ros_setup
-rosdep update && rosdep install --from-path src --ignore-src -y
-cd ~/micro_ros_arduino || exit
-colcon build
-source install/local_setup.bash
-ros2 run micro_ros_setup create_agent_ws.sh
-ros2 run micro_ros_setup build_agent.sh
-source install/local_setup.sh
-
-
-#Install Arduino CLI
-cd ~
-pip3 install pyserial
-git clone https://github.com/arduino/arduino-cli.git
-cd arduino-cli/
-export PATH=$PATH:/root/$USER/arduino-cli/bin
-./install.sh
-export PATH=$PATH:/root/$USER/arduino-cli/bin
-arduino-cli config init
-arduino-cli core update-index
-arduino-cli core install arduino:samd
-arduino-cli core install arduino:sam
-arduino-cli core install arduino:avr
-mkdir micro-ros_publisher
-cd micro-ros_publisher
-cp ~/micro-ros_publisher.ino ~/arduino-cli/micro-ros_publisher/
-cd ~/.arduino15/packages/arduino/hardware/sam/1.6.12/
-curl https://raw.githubusercontent.com/micro-ROS/micro_ros_arduino/foxy/extras/patching_boards/platform_arduinocore_sam.txt > platform.txt
\ No newline at end of file
diff --git a/3rd_party/ros/Micro-ROS/Dockerfile b/3rd_party/ros/Micro-ROS_DEPRECATED/Dockerfile
similarity index 85%
rename from 3rd_party/ros/Micro-ROS/Dockerfile
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/Dockerfile
index e51d1178a..c42c142a7 100644
--- a/3rd_party/ros/Micro-ROS/Dockerfile
+++ b/3rd_party/ros/Micro-ROS_DEPRECATED/Dockerfile
@@ -5,7 +5,7 @@ SHELL ["/bin/bash", "-c"]
# install tools
RUN groupadd -r micro_ros && \
apt-get update && \
- apt-get install -y --no-install-recommends python3-pip
+ apt-get install -y --no-install-recommends python3-pip curl
# set up workspaces
WORKDIR /root/
diff --git a/3rd_party/ros/Micro-ROS/HC_SR04_Foxy.py b/3rd_party/ros/Micro-ROS_DEPRECATED/HC_SR04_Foxy.py
similarity index 59%
rename from 3rd_party/ros/Micro-ROS/HC_SR04_Foxy.py
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/HC_SR04_Foxy.py
index 35fe2d1ef..499238769 100644
--- a/3rd_party/ros/Micro-ROS/HC_SR04_Foxy.py
+++ b/3rd_party/ros/Micro-ROS_DEPRECATED/HC_SR04_Foxy.py
@@ -2,6 +2,7 @@
import serial
import time
import std_msgs
+import zmq
from example_interfaces.msg import Int64
from rclpy.node import Node
@@ -9,7 +10,7 @@
from rclpy.qos import qos_profile_sensor_data # this is required to have a full data
ser = serial.Serial(
- port="/dev/ttyACM0",
+ port="/dev/ttyUSB0",
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
@@ -19,6 +20,11 @@
print("Found the ardiuno board.")
print("Creating the /scan topic..")
+socket_address = "tcp://0.0.0.0:2000"
+context = zmq.Context()
+socket = context.socket(zmq.PUB)
+socket.bind(socket_address)
+
class MinimalPublisher(Node):
def __init__(self):
@@ -28,19 +34,13 @@ def __init__(self):
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
- def timer_callback(self): # this is the part where we need to get it keep running
- check = ser.readline()
- if check == ' ': # this if statement is to skip string id. It doesn't seem like it works
- print("Skipped the ' '") # in #44 line, it kept recieving a string ' '
- else:
- sensorvalue = float(ser.readline()) # posts the value
- msg = Int64()
- msg.data= int(sensorvalue)
- print(msg)
- self.get_logger().info("distance: {}".format(sensorvalue))
- print(type(msg)) # this is to verify the type of the value. It should be float only
- self.publisher_.publish(msg) # this is to publish the data to topic 'scann'. It can change to 'scan' in #34 line
- self.i += 1
+ def timer_callback(self):
+ bytes = ser.readline()
+ data = bytes.decode(encoding="utf-8").strip("\r\n")
+ if data is not None or data is not '':
+ distance = int(data)
+ self.get_logger().info("MESSAGE: {}".format(distance))
+ socket.send_pyobj(distance)
def main(args=None):
@@ -55,7 +55,7 @@ def main(args=None):
# when the garbage collector destroys the node object)
minimal_publisher.destroy_node()
rclpy.shutdown()
- serialcomm.close()
+ # serialcomm.close()
if __name__ == '__main__':
diff --git a/3rd_party/ros/Micro-ROS/Micro-ROS-Arduino.md b/3rd_party/ros/Micro-ROS_DEPRECATED/Micro-ROS-Arduino.md
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/Micro-ROS-Arduino.md
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/Micro-ROS-Arduino.md
diff --git a/3rd_party/ros/Micro-ROS/ardiunotopython.ino b/3rd_party/ros/Micro-ROS_DEPRECATED/ardiunotopython.ino
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/ardiunotopython.ino
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/ardiunotopython.ino
diff --git a/3rd_party/ros/Micro-ROS/micro-ros_publisher.ino b/3rd_party/ros/Micro-ROS_DEPRECATED/micro-ros_publisher.ino
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/micro-ros_publisher.ino
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/micro-ros_publisher.ino
diff --git a/3rd_party/ros/Micro-ROS/ros_laser_scan.py b/3rd_party/ros/Micro-ROS_DEPRECATED/ros_laser_scan.py
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/ros_laser_scan.py
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/ros_laser_scan.py
diff --git a/3rd_party/ros/Micro-ROS/ros_teleop.py b/3rd_party/ros/Micro-ROS_DEPRECATED/ros_teleop.py
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/ros_teleop.py
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/ros_teleop.py
diff --git a/3rd_party/ros/Micro-ROS/setup.py b/3rd_party/ros/Micro-ROS_DEPRECATED/setup.py
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/setup.py
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/setup.py
diff --git a/3rd_party/ros/Micro-ROS_DEPRECATED/setup_micro_ros.sh b/3rd_party/ros/Micro-ROS_DEPRECATED/setup_micro_ros.sh
new file mode 100644
index 000000000..8aea8bcd4
--- /dev/null
+++ b/3rd_party/ros/Micro-ROS_DEPRECATED/setup_micro_ros.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+#Install micro-ros
+# cd ~
+# mkdir micro_ros_arduino
+# cd ~/micro_ros_arduino
+# source /opt/ros/$ROS_DISTRO/setup.bash
+# git clone -b foxy https://github.com/micro-ROS/micro_ros_setup.git src/micro_ros_setup
+# rosdep update && rosdep install --from-path src --ignore-src -y
+# cd ~/micro_ros_arduino || exit
+# colcon build
+# source install/local_setup.bash
+# ros2 run micro_ros_setup create_agent_ws.sh
+# ros2 run micro_ros_setup build_agent.sh
+# source install/local_setup.sh
+
+
+#Install Arduino CLI
+echo '**** Installing Arduino CLI ****'
+date
+cd ~
+pip3 install pyserial
+git clone https://github.com/arduino/arduino-cli.git
+cd arduino-cli/
+export PATH=$PATH:/root/$USER/arduino-cli/bin
+./install.sh
+export PATH=$PATH:/root/$USER/arduino-cli/bin
+arduino-cli config init
+arduino-cli core update-index
+arduino-cli core install arduino:samd
+arduino-cli core install arduino:sam
+arduino-cli core install arduino:avr
+# mkdir micro-ros_publisher
+# cd micro-ros_publisher
+# cp ~/micro-ros_publisher.ino ~/arduino-cli/micro-ros_publisher/
+# cd ~/.arduino15/packages/arduino/hardware/sam/1.6.12/
+# curl https://raw.githubusercontent.com/micro-ROS/micro_ros_arduino/foxy/extras/patching_boards/platform_arduinocore_sam.txt > platform.txt
\ No newline at end of file
diff --git a/3rd_party/ros/Micro-ROS/setup_ros_workspace.sh b/3rd_party/ros/Micro-ROS_DEPRECATED/setup_ros_workspace.sh
similarity index 96%
rename from 3rd_party/ros/Micro-ROS/setup_ros_workspace.sh
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/setup_ros_workspace.sh
index 3ea5da43b..fbc039a5b 100644
--- a/3rd_party/ros/Micro-ROS/setup_ros_workspace.sh
+++ b/3rd_party/ros/Micro-ROS_DEPRECATED/setup_ros_workspace.sh
@@ -13,4 +13,4 @@ sed '23i\ "ros_laser_scan = py_topic.ros_laser_scan:main",\n
cp ~/setup.py ~/ros2_ws/src/py_topic/
cd ~/ros2_ws/ || exit
colcon build
-source /opt/ros/foxy/setup.bash
\ No newline at end of file
+source /opt/ros/foxy/setup.bash
diff --git a/3rd_party/ros/Micro-ROS_DEPRECATED/sonar_setup.sh b/3rd_party/ros/Micro-ROS_DEPRECATED/sonar_setup.sh
new file mode 100755
index 000000000..15dd0854f
--- /dev/null
+++ b/3rd_party/ros/Micro-ROS_DEPRECATED/sonar_setup.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+source /opt/ros/foxy/setup.bash
+mypath=`pwd`
+cd ~/ros2_ws/
+cp $mypath/HC_SR04_Foxy.py ~/ros2_ws/src/py_topic/py_topic/
+cp $mypath/setup.py ~/ros2_ws/src/py_topic/
+colcon build
+source ~/ros2_ws/install/setup.bash
+echo "completed"
diff --git a/3rd_party/ros/Micro-ROS/start_sonar.sh b/3rd_party/ros/Micro-ROS_DEPRECATED/start_sonar.sh
similarity index 72%
rename from 3rd_party/ros/Micro-ROS/start_sonar.sh
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/start_sonar.sh
index 393e55f11..ca439810c 100755
--- a/3rd_party/ros/Micro-ROS/start_sonar.sh
+++ b/3rd_party/ros/Micro-ROS_DEPRECATED/start_sonar.sh
@@ -14,14 +14,14 @@ arduino-cli core install arduino:avr
arduino-cli lib search newping
arduino-cli lib install newping
cd ~
-cd Arduino/libraries/
-git clone -b foxy https://github.com/micro-ROS/micro_ros_arduino.git
-cd ~
+# cd Arduino/libraries/
+# git clone -b foxy https://github.com/micro-ROS/micro_ros_arduino.git
+# cd ~
cd arduino-cli/
export PATH=$PATH:/root/$USER/arduino-cli/bin
-arduino-cli board attach serial:///dev/ttyACM0 ardiunotopython
-arduino-cli compile --port /dev/ttyACM0 ardiunotopython
-arduino-cli upload --port /dev/ttyACM0 ardiunotopython
+arduino-cli board attach serial:///dev/ttyUSB0 ardiunotopython
+arduino-cli compile --port /dev/ttyUSB0 ardiunotopython
+arduino-cli upload --port /dev/ttyUSB0 ardiunotopython
#/bin/bash ~/linux_py2arduino.sh
source /opt/ros/foxy/setup.bash
cd ~/ros2_ws && source install/setup.bash && ros2 run py_topic sonar_sensor
diff --git a/3rd_party/ros/Micro-ROS/update.sh b/3rd_party/ros/Micro-ROS_DEPRECATED/update.sh
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/update.sh
rename to 3rd_party/ros/Micro-ROS_DEPRECATED/update.sh
diff --git a/3rd_party/ros/serial/Dockerfile b/3rd_party/ros/serial/Dockerfile
new file mode 100644
index 000000000..939a92610
--- /dev/null
+++ b/3rd_party/ros/serial/Dockerfile
@@ -0,0 +1,31 @@
+FROM ros:foxy
+SHELL ["/bin/bash", "-c"]
+
+# install tools
+RUN groupadd -r ubuntu && \
+ apt-get update && \
+ apt-get install -y --no-install-recommends python3-pip wget && \
+ apt-get install -y ros-foxy-composition
+
+WORKDIR /root/
+COPY ./ /root/
+
+RUN chmod u+x start_sonar.sh && \
+ cd ~ && \
+ mypath=`pwd` && \
+ pip3 install zmq && \
+ source /opt/ros/foxy/setup.bash && \
+ mkdir -p ~/ros2_ws/src && \
+ cd ~/ros2_ws/src || exit && \
+ ros2 pkg create --build-type ament_python py_topic && \
+ sudo cp /root/HC_SR04_Foxy.py /root/py2arduino.py /root/ros_laser_scan.py /root/ros_teleop.py ~/ros2_ws/src/py_topic/py_topic/ && \
+ sed '9i\ <buildtool_depend>ament_python</buildtool_depend>\n <exec_depend>rclpy</exec_depend>\n <exec_depend>geometry_msgs</exec_depend>' ~/ros2_ws/src/py_topic/package.xml > changed.txt && mv changed.txt ~/ros2_ws/src/py_topic/package.xml && \
+ sed '23i\ "ros_laser_scan = py_topic.ros_laser_scan:main",\n "ros_teleop = py_topic.ros_teleop:main"' ~/ros2_ws/src/py_topic/setup.py > changed.txt && mv changed.txt ~/ros2_ws/src/py_topic/setup.py && \
+ cp ~/setup.py ~/ros2_ws/src/py_topic/ && \
+ cd ~/ros2_ws/ || exit && \
+ colcon build && \
+ source /opt/ros/foxy/setup.bash && pip3 install pyserial
+
+
+
+ENTRYPOINT [ "./start_sonar.sh" ]
diff --git a/3rd_party/ros/serial/HC_SR04_Foxy.py b/3rd_party/ros/serial/HC_SR04_Foxy.py
new file mode 100644
index 000000000..79137df0e
--- /dev/null
+++ b/3rd_party/ros/serial/HC_SR04_Foxy.py
@@ -0,0 +1,78 @@
+import rclpy
+import serial
+import time
+import std_msgs
+import zmq
+
+from example_interfaces.msg import Int64
+from rclpy.node import Node
+from rclpy.qos import QoSProfile
+from rclpy.qos import qos_profile_sensor_data # this is required to have a full data
+
+ser = serial.Serial(
+ port="/dev/ttyACM0",
+ baudrate=9600,
+ parity=serial.PARITY_NONE,
+ stopbits=serial.STOPBITS_ONE,
+ bytesize=serial.EIGHTBITS
+) # connect to ardiuno port.
+# serialcomm.timeout = 1
+print("Found the ardiuno board.")
+print("Creating the /scan topic..")
+
+socket_address = "tcp://0.0.0.0:2000"
+context = zmq.Context()
+socket = context.socket(zmq.PUB)
+socket.bind(socket_address)
+
+class MinimalPublisher(Node):
+
+ def __init__(self):
+ super().__init__('minimal_publisher')
+ self.publisher_ = self.create_publisher(Int64, "scan", 10)
+ timer_period = 0 # seconds
+ self.timer = self.create_timer(timer_period, self.timer_callback)
+ self.i = 0
+
+ def timer_callback(self): # this is the part where we need to get it keep running
+ # check = ser.readline()
+ # if check == ' ': # this if statement is to skip string id. It doesn't seem like it works
+ # print("Skipped the ' '") # in #44 line, it kept recieving a string ' '
+ # else:
+ # sensorvalue = float(ser.readline()) # posts the value
+ # msg = Int64()
+ # msg.data= int(sensorvalue)
+ # print(msg)
+ # self.get_logger().info("distance: {}".format(sensorvalue))
+ # print(type(msg)) # this is to verify the type of the value. It should be float only
+ # self.publisher_.publish(msg) # this is to publish the data to topic 'scann'. It can change to 'scan' in #34 line
+ # self.i += 1
+ bytes = ser.readline()
+ data = bytes.decode(encoding="utf-8").strip("\r\n")
+ if data is not None and data != '':
+ if data[:4] == 'Ping':
+ data = data[5:]
+ data = data[:-2]
+ distance = int(float(data))
+ # self.get_logger().info(str(distance))
+ socket.send_pyobj(distance)
+
+
+def main(args=None):
+ rclpy.init(args=args)
+
+ minimal_publisher = MinimalPublisher()
+
+ rclpy.spin(minimal_publisher)
+
+ # Destroy the node explicitly
+ # (optional - otherwise it will be done automatically
+ # when the garbage collector destroys the node object)
+ minimal_publisher.destroy_node()
+ rclpy.shutdown()
+ serialcomm.close()
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/3rd_party/ros/serial/ardiunotopython.ino b/3rd_party/ros/serial/ardiunotopython.ino
new file mode 100644
index 000000000..4596481b2
--- /dev/null
+++ b/3rd_party/ros/serial/ardiunotopython.ino
@@ -0,0 +1,18 @@
+#include <NewPing.h> //This code is designed for HC-SR04 sensor specifically to get the better output. This is author of Tim Eckel - [email protected].
+//This code is to use the code and do the sample only.
+
+#define TRIGGER_PIN 9 // Arduino pin tied to trigger pin on the ultrasonic sensor.
+#define ECHO_PIN 11 // Arduino pin tied to echo pin on the ultrasonic sensor.
+#define MAX_DISTANCE 200 // Maximum distance we want to ping for (in centimeters). Maximum sensor distance is rated at 400-500cm.
+
+NewPing sonar(TRIGGER_PIN, ECHO_PIN, MAX_DISTANCE); // NewPing setup of pins and maximum distance.
+
+void setup() {
+ Serial.begin(9600); // Change 9600 so it can works well with the sensor
+}
+
+void loop() {
+ delay(50); // Wait 50ms between pings (about 20 pings/sec). 29ms should be the shortest delay between pings.
+ Serial.println(sonar.ping_cm()); // Send ping, get distance in cm and print result (0 = outside set distance range)
+ //Serial.println("cm");
+}
diff --git a/3rd_party/ros/serial/arduino_programmer.sh b/3rd_party/ros/serial/arduino_programmer.sh
new file mode 100644
index 000000000..c4cbc5113
--- /dev/null
+++ b/3rd_party/ros/serial/arduino_programmer.sh
@@ -0,0 +1,20 @@
+#Install Arduino CLI
+echo '**** Installing Arduino CLI ****'
+date
+cd ~
+pip3 install pyserial
+git clone https://github.com/arduino/arduino-cli.git
+cd arduino-cli/
+export PATH=$PATH:/root/$USER/arduino-cli/bin
+./install.sh
+export PATH=$PATH:/root/$USER/arduino-cli/bin
+arduino-cli config init
+arduino-cli core update-index
+#arduino-cli core install arduino:samd
+#arduino-cli core install arduino:sam
+arduino-cli core install arduino:avr
+#mkdir micro-ros_publisher
+#cd micro-ros_publisher
+#cp ~/micro-ros_publisher.ino ~/arduino-cli/micro-ros_publisher/
+#cd ~/.arduino15/packages/arduino/hardware/sam/1.6.12/
+curl https://raw.githubuse
\ No newline at end of file
diff --git a/3rd_party/ros/Micro-ROS/py2arduino.py b/3rd_party/ros/serial/py2arduino.py
similarity index 100%
rename from 3rd_party/ros/Micro-ROS/py2arduino.py
rename to 3rd_party/ros/serial/py2arduino.py
diff --git a/3rd_party/ros/Micro-ROS/micro_ros.py b/3rd_party/ros/serial/ros_laser_scan.py
similarity index 81%
rename from 3rd_party/ros/Micro-ROS/micro_ros.py
rename to 3rd_party/ros/serial/ros_laser_scan.py
index 4760e20c0..2e284340f 100644
--- a/3rd_party/ros/Micro-ROS/micro_ros.py
+++ b/3rd_party/ros/serial/ros_laser_scan.py
@@ -39,24 +39,18 @@
# limitations under the License.
"""
-
-import sensor_msgs.msg #this is needed to read lidar or any related to lidar.
import rclpy
import zmq
-import std_msgs
-from std_msgs.msg import Int32
-#from example_interfaces.msg import Int32
-from time import sleep
from rclpy.node import Node
-from sensor_msgs.msg import LaserScan #to call laserscan so it can convert the data or provide the data
+from sensor_msgs.msg import LaserScan
from rclpy.qos import QoSProfile
-from rclpy.qos import qos_profile_sensor_data #this is required to have a full data
+from rclpy.qos import qos_profile_sensor_data
print("Starting FEAGI-ROS Laser Scan Interface...")
# todo: export socket address to config file
-socket_address = 'tcp://127.0.0.1:2000'
+socket_address = 'tcp://0.0.0.0:2000'
context = zmq.Context()
socket = context.socket(zmq.PUB)
@@ -72,17 +66,20 @@ class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(
- Int32,
+ LaserScan,
'scan',
self.listener_callback,
qos_profile=qos_profile_sensor_data)
self.subscription # prevent unused variable warning
def listener_callback(self, msg):
- # self.get_logger().info("I heard: {}".format(msg)) #put .format(msg) to display the data
- self.get_logger().info("Distance: {}".format(msg)) #put .format(msg) to display the data
-
- socket.send_pyobj(msg)
+ try:
+ ranges = msg.ranges
+ socket.send_pyobj(ranges)
+ self.get_logger().info("angle_max: {}".format(msg.angle_max))
+ except AttributeError:
+ socket.send_pyobj(msg)
+ self.get_logger().info("angle_max: {}".format(msg.angle_max))
def main(args=None):
diff --git a/3rd_party/ros/serial/ros_teleop.py b/3rd_party/ros/serial/ros_teleop.py
new file mode 100644
index 000000000..6c36583ab
--- /dev/null
+++ b/3rd_party/ros/serial/ros_teleop.py
@@ -0,0 +1,254 @@
+#!/usr/bin/env python
+"""
+This module is a modified version of the teleop_keyboard from Darby Lim.
+
+Modifications are aimed to bypass the reading from keyboard and instead read them from a message quote.
+"""
+
+# Copyright (c) 2011, Willow Garage, Inc.
+# All rights reserved.
+#
+# Software License Agreement (BSD License 2.0)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of {copyright_holder} nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Darby Lim
+
+import os
+import select
+import sys
+import rclpy
+import zmq
+import time
+
+from geometry_msgs.msg import Twist
+from rclpy.qos import QoSProfile
+
+print("Starting FEAGI-ROS Teleop Interface...")
+
+if os.name == 'nt':
+ import msvcrt
+else:
+ import termios
+ import tty
+
+
+# todo: export socket address to config file
+socket_address = 'tcp://feagi:21000'
+print("Connecting to socket ", socket_address)
+
+# Setting up the message queue to receive navigation data from the teleop OPU.
+context = zmq.Context()
+socket = context.socket(zmq.SUB)
+socket.connect(socket_address)
+socket.set(zmq.SUBSCRIBE, ''.encode('utf-8'))
+listener = 0
+message = socket.recv_string()
+method_list = [method for method in dir(message) if method.startswith('_') is False]
+
+print("*******\n********\n", message, "*******\n********\n")
+
+BURGER_MAX_LIN_VEL = 0.22
+BURGER_MAX_ANG_VEL = 2.84
+
+WAFFLE_MAX_LIN_VEL = 0.26
+WAFFLE_MAX_ANG_VEL = 1.82
+
+LIN_VEL_STEP_SIZE = 0.01
+ANG_VEL_STEP_SIZE = 0.1
+
+TURTLEBOT3_MODEL = os.environ['TURTLEBOT3_MODEL']
+
+msg = """
+Customized for FEAGI
+"""
+
+e = """
+Communications Failed
+"""
+
+
+# def get_key(settings):
+# if os.name == 'nt':
+# return msvcrt.getch().decode('utf-8')
+# tty.setraw(sys.stdin.fileno())
+# rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
+# if rlist:
+# key = sys.stdin.read(1)
+# else:
+# key = ''
+#
+# termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
+# return key
+
+
+def print_vels(target_linear_velocity, target_angular_velocity):
+ print('currently:\tlinear velocity {0}\t angular velocity {1} '.format(
+ target_linear_velocity,
+ target_angular_velocity))
+
+
+def make_simple_profile(output, input, slop):
+ if input > output:
+ output = min(input, output + slop)
+ elif input < output:
+ output = max(input, output - slop)
+ else:
+ output = input
+
+ return output
+
+
+def constrain(input_vel, low_bound, high_bound):
+ if input_vel < low_bound:
+ input_vel = low_bound
+ elif input_vel > high_bound:
+ input_vel = high_bound
+ else:
+ input_vel = input_vel
+
+ return input_vel
+
+
+def check_linear_limit_velocity(velocity):
+ if TURTLEBOT3_MODEL == 'burger':
+ return constrain(velocity, -BURGER_MAX_LIN_VEL, BURGER_MAX_LIN_VEL)
+ else:
+ return constrain(velocity, -WAFFLE_MAX_LIN_VEL, WAFFLE_MAX_LIN_VEL)
+
+
+def check_angular_limit_velocity(velocity):
+ if TURTLEBOT3_MODEL == 'burger':
+ return constrain(velocity, -BURGER_MAX_ANG_VEL, BURGER_MAX_ANG_VEL)
+ else:
+ return constrain(velocity, -WAFFLE_MAX_ANG_VEL, WAFFLE_MAX_ANG_VEL)
+
+
+def main():
+ print('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
+ settings = None
+ if os.name != 'nt':
+ settings = termios.tcgetattr(sys.stdin)
+
+ rclpy.init()
+
+ qos = QoSProfile(depth=10)
+ node = rclpy.create_node('teleop_keyboard')
+ pub = node.create_publisher(Twist, 'cmd_vel', qos)
+
+ status = 0
+ target_linear_velocity = 0.0
+ target_angular_velocity = 0.0
+ control_linear_velocity = 0.0
+ control_angular_velocity = 0.0
+
+ try:
+ print(msg)
+ while 1:
+ # key = get_key(settings)
+ key = socket.recv_string()
+ if key:
+ print(time.ctime(time.time()), 'Message received from FEAGI was: ', key)
+ # print('\n')
+ if key == 'w':
+ target_linear_velocity =\
+ check_linear_limit_velocity(target_linear_velocity + LIN_VEL_STEP_SIZE)
+ status = status + 1
+ print_vels(target_linear_velocity, target_angular_velocity)
+ elif key == 'x':
+ target_linear_velocity =\
+ check_linear_limit_velocity(target_linear_velocity - LIN_VEL_STEP_SIZE)
+ status = status + 1
+ print_vels(target_linear_velocity, target_angular_velocity)
+ elif key == 'a':
+ target_angular_velocity =\
+ check_angular_limit_velocity(target_angular_velocity + ANG_VEL_STEP_SIZE)
+ status = status + 1
+ print_vels(target_linear_velocity, target_angular_velocity)
+ elif key == 'd':
+ target_angular_velocity =\
+ check_angular_limit_velocity(target_angular_velocity - ANG_VEL_STEP_SIZE)
+ status = status + 1
+ print_vels(target_linear_velocity, target_angular_velocity)
+ elif key == ' ' or key == 's':
+ target_linear_velocity = 0.0
+ control_linear_velocity = 0.0
+ target_angular_velocity = 0.0
+ control_angular_velocity = 0.0
+ print_vels(target_linear_velocity, target_angular_velocity)
+ else:
+ if key == '\x03':
+ break
+
+ if status == 20:
+ print(msg)
+ status = 0
+
+ twist = Twist()
+
+ control_linear_velocity = make_simple_profile(
+ control_linear_velocity,
+ target_linear_velocity,
+ (LIN_VEL_STEP_SIZE / 2.0))
+
+ twist.linear.x = control_linear_velocity
+ twist.linear.y = 0.0
+ twist.linear.z = 0.0
+
+ control_angular_velocity = make_simple_profile(
+ control_angular_velocity,
+ target_angular_velocity,
+ (ANG_VEL_STEP_SIZE / 2.0))
+
+ twist.angular.x = 0.0
+ twist.angular.y = 0.0
+ twist.angular.z = control_angular_velocity
+
+ pub.publish(twist)
+
+ except Exception as e:
+ print(e)
+
+ finally:
+ twist = Twist()
+ twist.linear.x = 0.0
+ twist.linear.y = 0.0
+ twist.linear.z = 0.0
+
+ twist.angular.x = 0.0
+ twist.angular.y = 0.0
+ twist.angular.z = 0.0
+
+ pub.publish(twist)
+
+ if os.name != 'nt':
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/3rd_party/ros/serial/setup.py b/3rd_party/ros/serial/setup.py
new file mode 100644
index 000000000..0213c81c8
--- /dev/null
+++ b/3rd_party/ros/serial/setup.py
@@ -0,0 +1,30 @@
+from setuptools import setup
+
+package_name = 'py_topic'
+
+setup(
+ name=package_name,
+ version='0.0.0',
+ packages=[package_name],
+ data_files=[
+ ('share/ament_index/resource_index/packages',
+ ['resource/' + package_name]),
+ ('share/' + package_name, ['package.xml']),
+ ],
+ install_requires=['setuptools'],
+ zip_safe=True,
+ maintainer='FEAGI',
+ maintainer_email='',
+ description='TODO: Package description',
+ license='TODO: License declaration',
+ tests_require=['pytest'],
+ entry_points={
+ 'console_scripts': [
+ 'listener = py_topic.ros_laser_scan:main',
+ 'sonar_sensor = py_topic.HC_SR04_Foxy:main', #This is the one you use the sonar sensor.
+ 'py_laser_scan = py_topic.ros_laser_scan:main', #This is the original ros_laser_scan
+ 'micro_ros = py_topic.micro_ros:main',
+ 'py2arduino = py_topic.py2arduino:main'
+ ], #Once you add this in your setup.py in ros_ws/src/py_topic/ and update your workspace.
+ }, #Then you can run like ros2 run py_topic
+)
diff --git a/3rd_party/ros/Micro-ROS/sonar_setup.sh b/3rd_party/ros/serial/sonar_setup.sh
similarity index 74%
rename from 3rd_party/ros/Micro-ROS/sonar_setup.sh
rename to 3rd_party/ros/serial/sonar_setup.sh
index b30ca67ce..262c34260 100755
--- a/3rd_party/ros/Micro-ROS/sonar_setup.sh
+++ b/3rd_party/ros/serial/sonar_setup.sh
@@ -2,7 +2,7 @@
source /opt/ros/foxy/setup.bash
mypath=`pwd`
cd ~/ros2_ws/
-cp $mypath/ros_laser_scan.py $mypath/ros_teleop.py $mypath/HC_SR04_Foxy.py $mypath/micro_ros.py $mypath/py2arduino.py ~/ros2_ws/src/py_topic/py_topic/
+cp $mypath/ros_laser_scan.py $mypath/ros_teleop.py $mypath/HC_SR04_Foxy.py $mypath/py2arduino.py ~/ros2_ws/src/py_topic/py_topic/
cp $mypath/setup.py ~/ros2_ws/src/py_topic/
colcon build
source ~/ros2_ws/install/setup.bash
diff --git a/3rd_party/ros/serial/start_sonar.sh b/3rd_party/ros/serial/start_sonar.sh
new file mode 100755
index 000000000..d2cb2f690
--- /dev/null
+++ b/3rd_party/ros/serial/start_sonar.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+
+#./arduino_programmer.sh
+source /opt/ros/foxy/setup.bash
+cd ~/ros2_ws && source install/setup.bash && ros2 run py_topic sonar_sensor
\ No newline at end of file
diff --git a/3rd_party/ros/serial/start_sonar_plus.sh b/3rd_party/ros/serial/start_sonar_plus.sh
new file mode 100755
index 000000000..d515a1720
--- /dev/null
+++ b/3rd_party/ros/serial/start_sonar_plus.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+echo "First time on the board only."
+echo "This will erase everything in your board."
+echo "This will start in 5 seconds. "
+echo "To cancel this process, press ctrl C"
+sleep 5
+
+/bin/bash ./sonar_setup.sh
+export PATH=$PATH:/root/$USER/arduino-cli/bin
+mkdir ~/arduino-cli/ardiunotopython/
+cp ~/ardiunotopython.ino ~/arduino-cli/ardiunotopython/
+arduino-cli core install arduino:avr
+arduino-cli lib search newping
+arduino-cli lib install newping
+cd arduino-cli/
+export PATH=$PATH:/root/$USER/arduino-cli/bin
+arduino-cli board attach serial:///dev/ttyUSB0 ardiunotopython
+arduino-cli compile --port /dev/ttyUSB0 ardiunotopython
+arduino-cli upload --port /dev/ttyUSB0 ardiunotopython
+
+source /opt/ros/foxy/setup.bash
+cd ~/ros2_ws && source install/setup.bash && ros2 run py_topic sonar_sensor
diff --git a/docker/Dockerfile b/docker/Dockerfile
index ed67782ff..8b7306b3c 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -7,6 +7,8 @@ ARG REPO="https://github.com/feagi/feagi-core.git"
RUN mkdir -p /opt/source-code/feagi-core/
RUN git clone $REPO /opt/source-code/feagi-core
WORKDIR /opt/source-code/feagi-core/
+RUN git fetch
+RUN git checkout bugfix-container-connection
# MNIST
# RUN wget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz -P /opt/source-code/feagi-core/raw/MNIST/ -q && \
diff --git a/docker/docker-compose-feagi-ros_serial-no_db.yml b/docker/docker-compose-feagi-ros_serial-no_db.yml
new file mode 100644
index 000000000..6a340926a
--- /dev/null
+++ b/docker/docker-compose-feagi-ros_serial-no_db.yml
@@ -0,0 +1,23 @@
+version: "3"
+
+networks:
+ net:
+ driver: bridge
+
+services:
+ ros:
+ build: ../3rd_party/ros/serial
+ # stdin_open: true
+ privileged: true
+ tty: true
+ networks:
+ - net
+ devices:
+ - "/dev/ttyACM0:/dev/ttyACM0"
+
+ feagi:
+ build: ./
+ environment:
+ - CONTAINERIZED=true
+ networks:
+ - net
\ No newline at end of file
diff --git a/src/feagi_configuration.ini b/src/feagi_configuration.ini
index ee27a341e..733e825e5 100644
--- a/src/feagi_configuration.ini
+++ b/src/feagi_configuration.ini
@@ -125,7 +125,7 @@ auto_test_comp_attempt_threshold = 3
ready_to_exit_burst =
logging_fire =
folder_backup =
-memory_formation = True
+memory_formation =
obsolete__plasticity =
capture_brain_activities =
visualize_latest_file =
diff --git a/src/ipu/processor/proximity.py b/src/ipu/processor/proximity.py
index 68f118e1a..3a93059ca 100644
--- a/src/ipu/processor/proximity.py
+++ b/src/ipu/processor/proximity.py
@@ -37,7 +37,7 @@ def lidar_to_coords(lidar_data, threshold=5):
return detection_locations
-def sonar_to_coords(sonar_data, threshold=None):
+def sonar_to_coords(sonar_data, threshold=5):
""" Converts SONAR data from sensor to coordinates in
the proximity cortical area.
@@ -47,7 +47,7 @@ def sonar_to_coords(sonar_data, threshold=None):
"""
# HC-SR04 datasheet specs (in cm)
SONAR_MIN = 2
- SONAR_MAX = 400
+ SONAR_MAX = 200
Z_MAX = runtime_data.genome['blueprint'] \
['proximity'] \
@@ -59,7 +59,7 @@ def sonar_to_coords(sonar_data, threshold=None):
x = 180
y = 90
z = dist_map
- return [(x, y, int(z))]
+ return [(x, y, int(z))]
def coords_to_neuron_ids(detection_locations, cortical_area):
@@ -71,13 +71,14 @@ def coords_to_neuron_ids(detection_locations, cortical_area):
:return: list of neuron IDs (str)
"""
neuron_ids = []
- for i in range(len(detection_locations)):
- block_ref = coords_to_block_ref(detection_locations[i], cortical_area)
- if block_ref in runtime_data.block_dic[cortical_area]:
- block_neurons = runtime_data.block_dic[cortical_area][block_ref]
- for neuron in block_neurons:
- if neuron is not None and neuron not in neuron_ids:
- neuron_ids.append(neuron)
+ if detection_locations is not None:
+ for i in range(len(detection_locations)):
+ block_ref = coords_to_block_ref(detection_locations[i], cortical_area)
+ if block_ref in runtime_data.block_dic[cortical_area]:
+ block_neurons = runtime_data.block_dic[cortical_area][block_ref]
+ for neuron in block_neurons:
+ if neuron is not None and neuron not in neuron_ids:
+ neuron_ids.append(neuron)
return neuron_ids
@@ -118,7 +119,7 @@ def map_value(val, min1, max1, min2, max2):
:param max2: max of range 2
:return: value mapped from range 1 to range 2
"""
- return (val-min1) * ((max2-min2) / (max1-min1)) + min2
+ return abs((val-min1) * ((max2-min2) / (max1-min1)) + min2)
def distance_3d(p1, p2):
diff --git a/src/ipu/source/lidar.py b/src/ipu/source/lidar.py
index 2d83f9612..69ac8c09a 100644
--- a/src/ipu/source/lidar.py
+++ b/src/ipu/source/lidar.py
@@ -48,7 +48,7 @@ def get_and_translate():
if hasattr(message, '__iter__'):
detections = proximity.lidar_to_coords(message)
else:
- detections = proximity.sonar_to_coords(int(message))
+ detections = proximity.sonar_to_coords(message)
neurons = proximity.coords_to_neuron_ids(
detections, cortical_area='proximity'
| stream data from Arduino sensor(s) into running container
- ensure that this is possible and that it can be achieved without issues stemming from lag/latency
| 2021-07-14T20:41:13 | 0.0 | [] | [] |
|||
schelterlabs/jenga | schelterlabs__jenga-8 | 6bcc842e8c9ba3b442f9ec94c892562ea1387082 | diff --git a/jenga/basis.py b/jenga/basis.py
index a7d3016..10778ae 100644
--- a/jenga/basis.py
+++ b/jenga/basis.py
@@ -96,17 +96,18 @@ def sample_rows(self, data):
n_values_to_discard = int(len(data) * min(self.fraction, 1.0))
perc_lower_start = np.random.randint(0, len(data) - n_values_to_discard)
perc_idx = range(perc_lower_start, perc_lower_start + n_values_to_discard)
+
+ # Not At Random
+ if self.sampling.endswith('NAR'):
+ # pick a random percentile of values in this column
+ rows = data[self.column].sort_values().iloc[perc_idx].index
# At Random
- if self.sampling.endswith('AR'):
+ elif self.sampling.endswith('AR'):
depends_on_col = np.random.choice(list(set(data.columns) - {self.column}))
# pick a random percentile of values in other column
rows = data[depends_on_col].sort_values().iloc[perc_idx].index
- # Not At Random
- elif self.sampling.endswith('NAR'):
- # pick a random percentile of values in this column
- rows = data[self.column].sort_values().iloc[perc_idx].index
else:
ValueError('sampling type not recognized')
| Question: Does the distinction between AR and NAR really work?
Python's `if`-statements always match exactly one case. See: https://docs.python.org/3/reference/compound_stmts.html#if
Maybe I miss something but I would argue that `NAR` corruptions does not work at the moment because it always matches `AR` first and does not check for `NAR` anymore.
See here: https://github.com/schelterlabs/jenga/blob/master/jenga/basis.py#L101 and here: https://github.com/schelterlabs/jenga/blob/master/jenga/basis.py#L107
Compare the output of the following snippet, which simulates the current code:
```python
for sampling in ['NAR', 'AR']:
if sampling.endswith('AR'):
print('AR')
elif sampling.endswith('NAR'):
print('NAR')
```
Output will be:
```
AR
AR
```
I suggest (and will send a PR in a second) to change the two cases, compare:
```python
for sampling in ['NAR', 'AR']:
if sampling.endswith('NAR'):
print('NAR')
elif sampling.endswith('AR'):
print('AR')
```
Output will be:
```
NAR
AR
| 2020-12-07T07:37:02 | 0.0 | [] | [] |
|||
qcpydev/qcpy | qcpydev__qcpy-113 | 6c3a553ca7a8cf779515e16b41ef5de636386ae6 | diff --git a/src/visualize/bloch.py b/src/visualize/bloch.py
index 56be747..28c169b 100644
--- a/src/visualize/bloch.py
+++ b/src/visualize/bloch.py
@@ -1,20 +1,21 @@
import re
-
import matplotlib.pyplot as plt
import numpy as np
-
+from typing import Union
+from numpy._core.multiarray import ndarray
+from ..quantum_circuit import QuantumCircuit
from ..errors import BlochSphereOutOfRangeError, InvalidSavePathError
from ..tools import amplitude, probability
from .base import light_mode, sphere, theme
def bloch(
- quantumstate: any,
+ quantumstate: Union[ndarray, QuantumCircuit],
path: str = "BlochSphere.png",
save: bool = False,
show: bool = True,
light: bool = False,
-):
+) -> None:
"""Creates a qsphere visualization that can be interacted with.
Args:
quantum_state (ndarray/QuantumCircuit): State vector array or qcpy quantum circuit.
diff --git a/src/visualize/probability.py b/src/visualize/probability.py
index 1880e88..f80fdaf 100644
--- a/src/visualize/probability.py
+++ b/src/visualize/probability.py
@@ -1,20 +1,20 @@
import re
-
+from typing import Union
import matplotlib.pyplot as plt
-import numpy as np
-
+from numpy import ndarray, log2
+from ..quantum_circuit import QuantumCircuit
from ..errors import InvalidSavePathError
from ..tools import probability as prob
from .base import graph, light_mode, theme
def probability(
- state: any,
+ quantumstate: Union[ndarray, QuantumCircuit],
path: str = "probabilities.png",
save: bool = False,
show: bool = True,
light: bool = False,
-):
+) -> None:
"""Creates a probability representation of a given quantum circuit in matplotlib.
Args:
quantum_state (ndarray/QuantumCircuit): State vector array or qcpy quantum circuit.
@@ -27,8 +27,8 @@ def probability(
"""
if save and re.search(r"[<>:/\\|?*]", path) or len(path) > 255:
raise InvalidSavePathError("Invalid file name")
- probabilities = prob(state)
- num_qubits = int(np.log2(probabilities.size))
+ probabilities = prob(quantumstate)
+ num_qubits = int(log2(probabilities.size))
state_list = [format(i, "b").zfill(num_qubits) for i in range(2**num_qubits)]
percents = [i * 100 for i in probabilities]
plt.clf()
diff --git a/src/visualize/q_sphere.py b/src/visualize/q_sphere.py
index 6fc769a..cbaf816 100644
--- a/src/visualize/q_sphere.py
+++ b/src/visualize/q_sphere.py
@@ -1,5 +1,5 @@
import matplotlib.pyplot as plt
-from numpy import pi, log2, ndarray, cos, sin, linspace
+from numpy import pi, log2, ndarray, cos, sin, linspace, ndarray
import math
import re
from typing import Union
@@ -15,7 +15,7 @@
def q_sphere(
- quantum_state: Union[ndarray, QuantumCircuit],
+ quantumstate: Union[ndarray, QuantumCircuit],
path: str = "qsphere.png",
save: bool = False,
show: bool = True,
@@ -38,8 +38,8 @@ def q_sphere(
ax = sphere(theme.BACKGROUND_COLOR)
light_mode(light)
color_bar(plt, theme.TEXT_COLOR, theme.ACCENT_COLOR, colors, norm)
- prob_values = probability(quantum_state)
- phase_values = phaseangle(quantum_state)
+ prob_values = probability(quantumstate)
+ phase_values = phaseangle(quantumstate)
num_qubits = int(log2(len(prob_values)))
bucket_array = [0] * (num_qubits + 1)
phi_values = linspace(0, pi, num_qubits + 1)
diff --git a/src/visualize/state_vector.py b/src/visualize/state_vector.py
index dc6eec2..85295f0 100644
--- a/src/visualize/state_vector.py
+++ b/src/visualize/state_vector.py
@@ -1,19 +1,23 @@
import matplotlib.pyplot as plt
-import numpy as np
+from numpy import log2, ndarray
+from numpy import log2, ndarray, amax, pi
from matplotlib.colors import rgb2hex
-from ..errors import *
+from typing import Union
+import re
+from ..quantum_circuit import QuantumCircuit
+from ..errors import InvalidSavePathError
from .base.graph import graph
from ..tools import amplitude, phaseangle
from .base import color_bar, theme, light_mode
def state_vector(
- circuit: any,
+ quantumstate: Union[ndarray, QuantumCircuit],
path: str = "statevector.png",
save: bool = False,
show: bool = True,
light: bool = False,
-):
+) -> None:
"""Outputs a state vector representation from a given quantum circuit in matplotlib.
Args:
quantum_state (ndarray/QuantumCircuit): State vector array or qcpy quantum circuit.
@@ -24,16 +28,16 @@ def state_vector(
Returns:
None
"""
- if save and re.search(r"[<>:/\\|?*]", path) or len(filename) > 255:
+ if save and re.search(r"[<>:/\\|?*]", path) or len(path) > 255:
raise InvalidSavePathError("Invalid file name")
- amplitudes = amplitude(circuit)
- phase_angles = phaseangle(circuit)
- num_qubits = int(np.log2(amplitudes.size))
+ amplitudes = amplitude(quantumstate)
+ phase_angles = phaseangle(quantumstate)
+ num_qubits = int(log2(amplitudes.size))
state_list = [format(i, "b").zfill(num_qubits) for i in range(2**num_qubits)]
light_mode(light)
ax = graph(theme.TEXT_COLOR, theme.BACKGROUND_COLOR, num_qubits)
- ax.set_ylim(0, np.amax(amplitudes))
- norm = plt.Normalize(0, np.pi * 2)
+ ax.set_ylim(0, amax(amplitudes))
+ norm = plt.Normalize(0, pi * 2)
colors = plt.get_cmap("hsv")
color_bar(plt, theme.TEXT_COLOR, theme.ACCENT_COLOR, colors, norm)
hex_arr = [rgb2hex(i) for i in colors(norm(phase_angles))]
| Visualize part of package needs docstrings
| 2024-11-19T06:38:02 | 0.0 | [] | [] |
|||
pwwang/datar | pwwang__datar-34 | 0b68a3105a52274b8e680714fe991ac2fb976297 | diff --git a/datar/__init__.py b/datar/__init__.py
index 0340b093..83316f42 100644
--- a/datar/__init__.py
+++ b/datar/__init__.py
@@ -4,4 +4,4 @@
from .core import _frame_format_patch
from .core.defaults import f
-__version__ = "0.3.1"
+__version__ = "0.3.2"
diff --git a/datar/base/string.py b/datar/base/string.py
index ea66a693..0b74d90b 100644
--- a/datar/base/string.py
+++ b/datar/base/string.py
@@ -7,7 +7,7 @@
from pipda import register_func
from ..core.contexts import Context
-from ..core.types import IntOrIter, StringOrIter, is_scalar, is_null
+from ..core.types import Dtype, IntOrIter, StringOrIter, is_scalar, is_null
from ..core.utils import (
arg_match,
get_option,
@@ -30,13 +30,14 @@
@register_func(None, context=Context.EVAL)
-def as_character(x: Any, _na: Any = NA) -> StringOrIter:
+def as_character(x: Any, str_dtype: Dtype = str, _na: Any = NA) -> StringOrIter:
"""Convert an object or elements of an iterable into string
Aliases `as_str` and `as_string`
Args:
x: The object
+ str_dtype: The string dtype to convert to
_na: How NAs should be casted. Specify NA will keep them unchanged.
But the dtype will be object then.
@@ -45,8 +46,7 @@ def as_character(x: Any, _na: Any = NA) -> StringOrIter:
When x is iterable, convert elements of it into strings
Otherwise, convert x to string.
"""
- return _as_type(x, str, na=_na)
-
+ return _as_type(x, str_dtype, na=_na)
as_str = as_string = as_character
diff --git a/datar/core/_frame_format_patch.py b/datar/core/_frame_format_patch.py
index b3cf4e96..84a1eb41 100644
--- a/datar/core/_frame_format_patch.py
+++ b/datar/core/_frame_format_patch.py
@@ -1,3 +1,36 @@
+# BSD 3-Clause License
+
+# Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc.
+# and PyData Development Team
+# All rights reserved.
+
+# Copyright (c) 2011-2021, Open source contributors.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
"""Monkey-patch data frame format to
1. add dtypes next to column names when printing
2. collapse data frames when they are elements of a parent data frame.
@@ -37,12 +70,7 @@
from .options import add_option
-# pylint: disable=c-extension-no-member
-# pylint: disable=invalid-name
-# pylint: disable=too-many-branches
-# pylint: disable=too-many-statements
-# pylint: disable=consider-using-enumerate
-# pylint: disable=too-many-nested-blocks
+# pylint: skip-file
# TODO: patch more formatters
diff --git a/datar/core/operator.py b/datar/core/operator.py
index 7f01916f..cdbbaeca 100644
--- a/datar/core/operator.py
+++ b/datar/core/operator.py
@@ -12,6 +12,15 @@
from .exceptions import DataUnrecyclable
from .types import BoolOrIter
+class DatarOperatorMeta(type):
+ """Allow attributes with '_op_' to pass for operator functions"""
+ def __getattr__(cls, name: str) -> Any:
+ """If name starts with '_op_', let it go self for the real function
+ Otherwise, do regular getattr.
+ """
+ if name.startswith('_op_'):
+ return True
+ return super().__getattr__(name)
@register_operator
class DatarOperator(Operator):
@@ -30,19 +39,19 @@ def _arithmetize2(self, left: Any, right: Any, op: str) -> Any:
left, right = _recycle_left_right(left, right)
return op_func(left, right)
- def invert(self, operand: Any) -> Any:
+ def _op_invert(self, operand: Any) -> Any:
"""Interpretation for ~x"""
- if isinstance(operand, (slice, str, list, tuple, Collection)):
+ if isinstance(operand, (slice, str, list, tuple)):
return Inverted(operand)
return self._arithmetize1(operand, "invert")
- def neg(self, operand: Any) -> Any:
+ def _op_neg(self, operand: Any) -> Any:
"""Interpretation for -x"""
if isinstance(operand, (slice, list)):
return Negated(operand)
return self._arithmetize1(operand, "neg")
- def and_(self, left: Any, right: Any) -> Any:
+ def _op_and_(self, left: Any, right: Any) -> Any:
"""Mimic the & operator in R.
This has to have Expression objects to be involved to work
@@ -63,7 +72,7 @@ def and_(self, left: Any, right: Any) -> Any:
right = Series(right).fillna(False)
return left & right
- def or_(self, left: Any, right: Any) -> Any:
+ def _op_or_(self, left: Any, right: Any) -> Any:
"""Mimic the & operator in R.
This has to have Expression objects to be involved to work
@@ -84,9 +93,9 @@ def or_(self, left: Any, right: Any) -> Any:
return left | right
# pylint: disable=invalid-name
- def ne(self, left: Any, right: Any) -> BoolOrIter:
+ def _op_ne(self, left: Any, right: Any) -> BoolOrIter:
"""Interpret for left != right"""
- out = self.eq(left, right)
+ out = self._op_eq(left, right)
if isinstance(out, (numpy.ndarray, Series)):
neout = ~out
# neout[pandas.isna(out)] = numpy.nan
@@ -96,11 +105,11 @@ def ne(self, left: Any, right: Any) -> BoolOrIter:
def __getattr__(self, name: str) -> Any:
"""Other operators"""
- if not hasattr(operator, name):
- raise AttributeError
- attr = partial(self._arithmetize2, op=name)
- attr.__qualname__ = self._arithmetize2.__qualname__
- return attr
+ if name.startswith('_op_'):
+ attr = partial(self._arithmetize2, op=name[4:])
+ attr.__qualname__ = self._arithmetize2.__qualname__
+ return attr
+ return super().__getattr__(name)
def _recycle_left_right(left: Any, right: Any) -> Tuple:
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 11238eec..0298b644 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 0.3.2
+- Adopt `pipda` v0.4.1 to fix `getattr()` failure for operater-connected expressions (#38)
+- Add `str_dtype` argument to `as_character()` to partially fix #36
+- Update license in `core._frame_format_patch` (#28)
+
## 0.3.1
- Adopt `pipda` v0.4.0
- Change argument `_dtypes` to `dtypes_` for tibble-families
diff --git a/docs/caveats/NAs.md b/docs/caveats/NAs.md
new file mode 100644
index 00000000..21bfb4d8
--- /dev/null
+++ b/docs/caveats/NAs.md
@@ -0,0 +1,22 @@
+
+- dtype
+
+ `NA` in datar sets to `numpy.nan`, which is a float. So that it causes problems for other dtypes of data, because setting a value to NA (float) in an array with other dtype is not compatible. Unlink R, python does not have missing value type for other dtypes.
+
+ pandas has introduced it's own `NA` and some `NA` compatible dtypes. However, `numpy` is still not aware of it, which causes problems for internal computations.
+
+- string
+
+ When initialize a string array intentionally: `numpy.array(['a', NA])`, the `NA` will be converted to a string `'nan'`. That may not be what we want sometimes. To avoid that, use `None` or `NULL` instead:
+
+ ```python
+ >>> numpy.array(['a', None])
+ array(['a', None], dtype=object)
+ ```
+
+ Just pay attention that the dtype falls back to object.
+
+
+- `NaN`
+
+ Since `NA` is already a float, `NaN` here is equivalent to `NA`.
diff --git a/docs/caveats/df_index_colname.md b/docs/caveats/df_index_colname.md
new file mode 100644
index 00000000..953d7a37
--- /dev/null
+++ b/docs/caveats/df_index_colname.md
@@ -0,0 +1,17 @@
+
+Most APIs from tidyverse packages ignore/reset the index (row names) of data frames, so do the APIs from `datar`. So when selecting rows, row indices are always used. With most APIs, the indices of the data frames are dropped, so they are actually ranging from 0 to `nrow(df) - 1`.
+
+!!! Note
+
+ when using 1-based indexing (default), 1 selects the first row. Even though the first row shows index 0 when it's printed.
+
+No `MultiIndex` indices/column names are supported for the APIs to select or manipulate data frames and the data frames generated by the APIs will not have `MultiIndex` indices/column names. However, since it's still pandas DataFrame, you can always do it in pandas way:
+
+```python
+df = tibble(x=1, y=2)
+df2 = df >> mutate(z=f.x+f.y)
+# pandas way to select
+df2.iloc[0, z] # 3
+# add multiindex to it:
+df.columns = pd.MultiIndex.from_product([df.columns, ['C']])
+```
\ No newline at end of file
diff --git a/docs/caveats/grouped.md b/docs/caveats/grouped.md
new file mode 100644
index 00000000..29109aa9
--- /dev/null
+++ b/docs/caveats/grouped.md
@@ -0,0 +1,9 @@
+
+`datar` doesn't use `pandas`' `DataFrameGroupBy`/`SeriesGroupBy` classes. Instead, we have our own `DataFrameGroupBy` class, which is actually a subclass of `DataFrame`, with 3 extra properties: `_group_data`, `_group_vars` and `_group_drop`, carring the grouping data, grouping variables/columns and whether drop the non-observable values. This is very similar to `grouped_df` from `dplyr`.
+
+The reasons that we implement this are:
+
+1. Pandas DataFrameGroupBy cannot handle mutilpe categorical columns as
+ groupby variables with non-obserable values
+2. It is very hard to retrieve group indices and data when doing apply
+3. NAs unmatched in grouping variables
diff --git a/docs/caveats/in.md b/docs/caveats/in.md
new file mode 100644
index 00000000..9961c8e7
--- /dev/null
+++ b/docs/caveats/in.md
@@ -0,0 +1,80 @@
+`%in%` in R is a shortcut for `is.element()` to test if the elements are in a container.
+
+```r
+r$> c(1,3,5) %in% 1:4
+[1] TRUE TRUE FALSE
+
+r$> is.element(c(1,3,5), 1:4)
+[1] TRUE TRUE FALSE
+```
+
+However, `in` in python acts differently:
+
+```python
+>>> import numpy as np
+>>>
+>>> arr = np.array([1,2,3,4])
+>>> elts = np.array([1,3,5])
+>>>
+>>> elts in arr
+/.../bin/bpython:1: DeprecationWarning: elementwise comparison failed; this will raise an error in the future.
+ #!/.../bin/python
+False
+>>> [1,2] in [1,2,3]
+False
+```
+
+It simply tests if the element on the left side of `in` is equal to any of the elements in the right side. Regardless of whether the element on the left side is scalar or not.
+
+Yes, we can redefine the behavior of this by writing your own `__contains__()` methods of the right object. For example:
+
+```python
+>>> class MyList(list):
+... def __contains__(self, key):
+... # Just an example to let it return the reversed result
+... return not super().__contains__(key)
+...
+>>> 1 in MyList([1,2,3])
+False
+>>> 4 in MyList([1,2,3])
+True
+```
+
+But the problem is that the result `__contains__()` is forced to be a scalar bool by python. In this sense, we cannot let `x in y` to be evaluated as a bool array or even a pipda `Expression` object.
+```python
+>>> class MyList(list):
+... def __contains__(self, key):
+... # Just an example
+... return [True, False, True] # logically True in python
+...
+>>> 1 in MyList([1,2,3])
+True
+>>> 4 in MyList([1,2,3])
+True
+```
+
+So instead, we ported `is.element()` from R:
+
+```python
+>>> import numpy as np
+>>> from datar.base import is_element
+>>>
+>>> arr = np.array([1,2,3,4])
+>>> elts = np.array([1,3,5])
+>>>
+>>> is_element(elts, arr)
+>>> is_element(elts, arr)
+array([ True, True, False])
+```
+
+So, as @rleyvasal pointed out in https://github.com/pwwang/datar/issues/31#issuecomment-877499212,
+
+if the left element is a pandas `Series`:
+```python
+>>> import pandas as pd
+>>> pd.Series(elts).isin(arr)
+0 True
+1 True
+2 False
+dtype: bool
+```
diff --git a/docs/indexing.md b/docs/caveats/indexing.md
similarity index 95%
rename from docs/indexing.md
rename to docs/caveats/indexing.md
index 8d6a0fca..f8865f8d 100644
--- a/docs/indexing.md
+++ b/docs/caveats/indexing.md
@@ -20,6 +20,8 @@ In `R`, negative indexes mean removal. However, here negative indexes are still
selection, as `-1` for the last column, `-2` for the second last, etc. It is
the same for both 0-based and 1-based indexing.
+If you want to do negative selection, use tilde `~` instead of `-`.
+
## Temporary index base change
For example:
diff --git a/docs/caveats/list.md b/docs/caveats/list.md
new file mode 100644
index 00000000..67538e97
--- /dev/null
+++ b/docs/caveats/list.md
@@ -0,0 +1,10 @@
+
+R's list is actually a name-value pair container. When there is a need for it, we use python's dict instead, since python's list doesn't support names.
+
+For example:
+```python
+>>> names({'a':1}, 'x')
+{'x': 1}
+```
+
+We have `base.c()` to mimic `c()` in R, which will concatenate and flatten anything passed into it. Unlike `list()` in python, it accepts multiple arguments. So that you can do `c(1,2,3)`, but you cannot do `list(1,2,3)` in python.
diff --git a/docs/caveats/nested_data_frames.md b/docs/caveats/nested_data_frames.md
new file mode 100644
index 00000000..ef3c9112
--- /dev/null
+++ b/docs/caveats/nested_data_frames.md
@@ -0,0 +1,18 @@
+
+pandas DataFrame doesn't support nested data frames. However, some R packages do, especially `tidyr`.
+
+Here we uses fake nested data frames:
+
+```python
+>>> df = tibble(x=1, y=tibble(a=2, b=3))
+>>> df
+ x y$a y$b
+ <int64> <int64> <int64>
+0 1 2 3
+```
+
+Now `df` is a fake nested data frame, with an inner data frame as column `y` in `df`.
+
+!!! Warning
+
+ For APIs from `tidyr` that tidies nested data frames, this is fully supported, but just pay attention when you operate it in pandas way. For other APIs, this feature is still experimental.
diff --git a/docs/caveats/ptypes.md b/docs/caveats/ptypes.md
new file mode 100644
index 00000000..bd67894d
--- /dev/null
+++ b/docs/caveats/ptypes.md
@@ -0,0 +1,2 @@
+
+Unlike some APIs from `tidyverse` packages that uses a data frame as `ptypes` tempate, here we use dtypes directly or a dict with name-dtype pairs for the columns.
diff --git a/docs/caveats/tibble_vs_dataframe.md b/docs/caveats/tibble_vs_dataframe.md
new file mode 100644
index 00000000..7c2e0ef4
--- /dev/null
+++ b/docs/caveats/tibble_vs_dataframe.md
@@ -0,0 +1,6 @@
+
+`datar` introduced `tibble` package as well.
+
+However, unlike in R, `tidyverse`'s `tibble` is a different class than the `data.frame` from base R, the data frame created by `datar.tibble.tibble()` and family is actually a pandas `DataFrame`. It's just a wrapper around the constructor.
+
+So you can do anything you do using pandas API after creation.
diff --git a/docs/piping_vs_regular.md b/docs/piping_vs_regular.md
deleted file mode 100644
index e6c7731e..00000000
--- a/docs/piping_vs_regular.md
+++ /dev/null
@@ -1,41 +0,0 @@
-
-A verb can be called using a piping syntax:
-```python
-df >> verb(...)
-```
-
-Or in a regular way:
-```python
-verb(df, ...)
-```
-
-The piping is recommended and is designed specially to enable full features of `datar` with [`pipda`][1].
-
-The regular form of calling a verb has no problems with simple arguments (arguments that don't involve any functions registered by `register_func()/register_verb()`). Functions registered by `register_func(None, ...)` that don't have data argument as the first argument are also perfect to work in this form.
-
-However, there may be problems with verb calls as arguments of a verb, or a function call with data argument as arguments of a verb. In most cases, they are just fine, but there are ambiguous cases when the functions have optional arguments, and the second argument has the same type annotation as the first one. Because we cannot distinguish whether we should call it regularly or let it return a `Function` object to wait for the data to be piped in.
-
-For example:
-
-```python
-@register_verb(int)
-def add(a: int, b: int):
- return a + b
-
-@register_func(int)
-def incr(x: int, y: int = 3):
- return x + y
-
-add(1, incr(2))
-```
-
-In such a case, we don't know whether `incr(2)` should be interpreted as `incr(2, y=3)` or `add(y=3)` waiting for `x` to be piped in.
-
-The above code will still run and get a result of `6`, but a warning will be showing about the ambiguity.
-
-To avoid this, use the piping syntax: `1 >> add(incr(2))`, resulting in `4`. Or if you are intended to do `incr(2, y=3)`, specify a value for `y`: `add(1, incr(2, 3))`, resulting in `6`, without a warning.
-
-For more details, see also the [caveats][2] from `pipda`
-
-[1]: https://github.com/pwwang/pipda
-[2]: https://github.com/pwwang/pipda#caveats
diff --git a/docs/porting_rules.md b/docs/porting_rules.md
index 0c6282ae..96b44e77 100644
--- a/docs/porting_rules.md
+++ b/docs/porting_rules.md
@@ -23,94 +23,3 @@
## Extra arguments
In order to keep some python language features, or extend the APIs a little, a few APIs may come with extra arguments. For example, to allow people to work with 0-indexing, `base0_` argument is added to functions that involve indexing. `how_` for `drop_na` is added to allow drop rows of a data frame with `any` or `all` values of in that row.
-
-## `tibble` vs `DataFrame`
-
-`datar` introduced `tibble` package as well. However, unlike in R, `tidyverse`'s tibble is a different class than the `data.frame` from base R, the data frame created by `datar.tibble.tibble` is actually a pandas `DataFrame`. It's just a wrapper around the constructor.
-
-## Data frame indexes and column names
-
-Most APIs from tidyverse packages ignore/reset the index (row names) of data frames, so do the APIs from datar. So when selecting rows, row indices are always used. With most APIs, the indices of the data frames are dropped, so they are actually ranging from 0 to `nrow(df) - 1`.
-
-!!! Note
- when using 1-based indexing (default), 1 selects the first row. Even though the first row shows index 0 when it's printed.
-
-No `MultiIndex` indices/column names are supported for the APIs to select or manipulate data frames and the data frames generated by the APIs will not have `MultiIndex` indices/column names. However, since it's still pandas DataFrame, you can always do it in pandas way:
-
-```python
-df = tibble(x=1, y=2)
-df2 = df >> mutate(z=f.x+f.y)
-# pandas way to select
-df2.iloc[0, z] # 3
-# add multiindex to it:
-df.columns = pd.MultiIndex.from_product([df.columns, ['C']])
-```
-
-## Nested data frames
-
-pandas DataFrame doesn't support nested data frames. However, some R packages do, especially `tidyr`.
-
-Here we uses fake nested data frames:
-
-```python
->>> df = tibble(x=1, y=tibble(a=2, b=3))
->>> df
- x y$a y$b
- <int64> <int64> <int64>
-0 1 2 3
-```
-
-Now `df` is a fake nested data frame, with an inner data frame as column `y` in `df`.
-
-!!! Warning
-
- For APIs from `tidyr` that tidies nested data frames, this is fully supported, but just pay attention when you operate it in pandas way. For other APIs, this feature is still experimental.
-
-## `list` in `R` vs `list` in `python`
-
-R's list is actually a name-value pair container. When there is a need for it, we use python's dict instead, since python's list doesn't support names.
-
-For example:
-```python
->>> names({'a':1}, 'x')
-{'x': 1}
-```
-
-## `ptypes`
-
-Unlike some APIs from `tidyverse` packages that uses a data frame as `ptypes` tempate, here we use dtypes directly or a dict with name-dtype pairs for the columns.
-
-## Grouped/rowwise data frame
-
-`datar` doesn't use `pandas`' `DataFrameGroupBy`/`SeriesGroupBy` classes. Instead, we have our own `DataFrameGroupBy` class, which is actually a subclass of `DataFrame`, with 3 extra properties: `_group_data`, `_group_vars` and `_group_drop`, carring the grouping data, grouping variables/columns and whether drop the non-observable values. This is very similar to `grouped_df` from `dplyr`.
-
-The reasons that we implement this are:
-
-1. Pandas DataFrameGroupBy cannot handle mutilpe categorical columns as
- groupby variables with non-obserable values
-2. It is very hard to retrieve group indices and data when doing apply
-3. NAs unmatched in grouping variables
-
-## `NA` caveats
-
-- dtype
-
- `NA` in datar sets to `numpy.nan`, which is a float. So that it causes problems for other dtypes of data, because setting a value to NA (float) in an array with other dtype is not compatible. Unlink R, python does not have missing value type for other dtypes.
-
- pandas has introduced it's own `NA` and some `NA` compatible dtypes. However, `numpy` is still not aware of it, which causes problems for internal computations.
-
-- string
-
- When initialize a string array intentionally: `numpy.array(['a', NA])`, the `NA` will be converted to a string `'nan'`. That may not be what we want sometimes. To avoid that, use `None` or `NULL` instead:
-
- ```python
- >>> numpy.array(['a', None])
- array(['a', None], dtype=object)
- ```
-
- Just pay attention that the dtype falls back to object.
-
-
-- `NaN`
-
- Since `NA` is already a float, `NaN` here is equivalent to `NA`.
diff --git a/mkdocs.yml b/mkdocs.yml
index 2fc66b3d..4d4a4f05 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -45,8 +45,16 @@ nav:
- 'Porting rules': 'porting_rules.md'
- 'Import datar': 'import.md'
- 'The f-expression': 'f.md'
- - 'Piping vs regular calling': 'piping_vs_regular.md'
- - 'Indexing/Selection': 'indexing.md'
+ - 'Caveats':
+ 'Indexing/Selection': 'caveats/indexing.md'
+ 'Tibble vs. DataFrame': 'caveats/tibble_vs_dataframe.md'
+ 'Data frame indexes and column names': 'caveats/df_index_colname.md'
+ 'Nested data frames': 'caveats/nested_data_frames.md'
+ 'List in R vs in python': 'caveats/list.md'
+ 'Ptypes': 'caveats/ptypes.md'
+ 'Grouped/rowwise data frame': 'caveats/grouped.md'
+ 'NAs': 'caveats/NAs.md'
+ 'in vs %in%': 'caveats/in.md'
- 'Datasets': 'datasets.md'
- 'Advanced usage': 'advanced.md'
- 'Examples':
diff --git a/pyproject.toml b/pyproject.toml
index 8d63b1f0..4d0a8135 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "datar"
-version = "0.3.1"
+version = "0.3.2"
description = "Port of dplyr and other related R packages in python, using pipda."
authors = ["pwwang <[email protected]>"]
readme = "README.md"
diff --git a/setup.py b/setup.py
index 5942964a..398c9fe9 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
setup(
long_description=readme,
name='datar',
- version='0.3.1',
+ version='0.3.2',
description='Port of dplyr and other related R packages in python, using pipda.',
python_requires='==3.*,>=3.7.1',
project_urls={"homepage": "https://github.com/pwwang/datar",
| Show "str" type if data casted to `str` instead of `object`
Thanks for the quick response @pwwang .
Changing the data type works for me.
BTW, is there a way to display variable type `str` under the variable name? - after changing data type to `str` still shows `<object>` under the variable name `Description`
_Originally posted by @rleyvasal in https://github.com/pwwang/datar/issues/35#issuecomment-878615048_
Getting attributes not working for operator-connected expressions
```python
from datar.all import *
df = tibble(x=[1,-2,3], y=[-4,5,-6])
df >> mutate(z=(f.x + f.y).abs()) # AttributeError
```
Show "str" type if data casted to `str` instead of `object`
Thanks for the quick response @pwwang .
Changing the data type works for me.
BTW, is there a way to display variable type `str` under the variable name? - after changing data type to `str` still shows `<object>` under the variable name `Description`
_Originally posted by @rleyvasal in https://github.com/pwwang/datar/issues/35#issuecomment-878615048_
| 2021-07-10T02:01:36 | 0.0 | [] | [] |
|||
tedchou12/webull | tedchou12__webull-196 | 05d2deeb926dd9b61e216ef644a86652cde3b1bd | diff --git a/webull/webull.py b/webull/webull.py
index d429432..a61a01e 100644
--- a/webull/webull.py
+++ b/webull/webull.py
@@ -321,7 +321,7 @@ def get_current_orders(self) :
data = self.get_account()
return data['openOrders']
- def get_history_orders(self, status='Cancelled', count=20):
+ def get_history_orders(self, status='All', count=20):
'''
Historical orders, can be cancelled or filled
status = Cancelled / Filled / Working / Partially Filled / Pending / Failed / All
| function get_history_orders() retrieves only cancelled orders
Ted,
When utilizing the `webull.get_history_orders()` function, I was expecting to see cancelled and filled orders but upon investigation noticed that the default `status="cancelled"`. I find it more useful to know either "filled" or "all" orders and would like to recommend either as defaults for a future release.
Appreciate all the work that you've put into this webull api interface. Very powerful and easy to use.
| 2021-02-28T07:54:16 | 0.0 | [] | [] |
|||
Noble-Lab/casanovo | Noble-Lab__casanovo-218 | 4d3e2f9fd02e76842a03d012bb8283a49b7751a6 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index bc6340e6..6076b631 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
## [Unreleased]
+### Fixed
+
+- Don't try to assign non-existing output writer during eval mode.
+
## [3.4.0] - 2023-06-19
### Added
diff --git a/casanovo/denovo/model_runner.py b/casanovo/denovo/model_runner.py
index fb5deeba..223c14c7 100644
--- a/casanovo/denovo/model_runner.py
+++ b/casanovo/denovo/model_runner.py
@@ -124,7 +124,7 @@ def _execute_existing(
if len(peak_filenames := _get_peak_filenames(peak_path, peak_ext)) == 0:
logger.error("Could not find peak files from %s", peak_path)
raise FileNotFoundError("Could not find peak files")
- else:
+ elif out_writer is not None:
out_writer.set_ms_run(peak_filenames)
peak_is_index = any(
[os.path.splitext(fn)[1] in (".h5", ".hdf5") for fn in peak_filenames]
| Casanovo Errors in Eval Mode
Running Casanovo in eval mode gives the following error:
> File "/usr/local/lib/python3.10/dist-packages/casanovo/denovo/model_runner.py", line 128, in _execute_existing out_writer.set_ms_run(peak_filenames)
>
> AttributeError: 'NoneType' object has no attribute 'set_ms_run'
You can reproduce using the simple one-spectrum example laid out in this notebook:
https://colab.research.google.com/github/Noble-Lab/casanovo_asms2023/blob/main/Casanovo_Tutorial.ipynb#scrollTo=b7yHuyE17Awf
| Is this a problem in the `dev` branch as well? We did a major refactor there and I suspect it may be solved.
I believe that it is not in the dev branch. But the issue was posted in response to my suggestion (on Slack) that we consider rescinding release 3.4.0 because it doesn't seem to work. While we work on getting the dev branch to be functional, I thought maybe we should either revert to 3.3 or push a 3.5 that runs successfully. | 2023-07-28T15:15:55 | 0.0 | [] | [] |
||
cortexm/pyswd | cortexm__pyswd-24 | 242a0650738997725c1f9ca4f13fec9abbc6d259 | diff --git a/swd/swd.py b/swd/swd.py
index 994220a..15683a8 100644
--- a/swd/swd.py
+++ b/swd/swd.py
@@ -177,7 +177,7 @@ def write_mem(self, address, data):
if len(chunk) > self._drv.maximum_8bit_data:
chunk_size32 = len(chunk) & 0xfffffffc
self._drv.write_mem32(address, chunk[:chunk_size32])
- del chunk[:chunk_size32]
+ chunk = chunk[chunk_size32:]
address += chunk_size32
self._drv.write_mem8(address, chunk)
return
| Error when loading large data
When trying to load write_mem a very large piece of data
```
.local/lib/python3.7/site-packages/swd/swd.py", line 180, in write_mem
del chunk[:chunk_size32]
TypeError: 'bytes' object does not support item deletion
```
| 2022-05-10T06:02:48 | 0.0 | [] | [] |
|||
RWTH-EBC/ebcpy | RWTH-EBC__ebcpy-75 | 57d9369b38c176a97f182a90d2176746b97e23fe | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 900abfb0..72a92329 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -62,3 +62,6 @@
- v0.3.2:
- Correct bounds for simulation Variables #56
- Add script to reproduce simulation studies and store info in an archive #27
+- v0.3.3:
+ - Fix memory error for large parameter variations durring multiproccesing #76
+ - Add option to set only one savepath for saving files of parameter varaiations #74
diff --git a/docs/source/conf.py b/docs/source/conf.py
index be7ae0e8..308214b3 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -71,7 +71,7 @@
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
-release = '0.3.2'
+release = '0.3.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/ebcpy/__init__.py b/ebcpy/__init__.py
index ee001230..e0a6656f 100644
--- a/ebcpy/__init__.py
+++ b/ebcpy/__init__.py
@@ -8,4 +8,4 @@
from .optimization import Optimizer
-__version__ = '0.3.2'
+__version__ = '0.3.3'
diff --git a/ebcpy/simulationapi/__init__.py b/ebcpy/simulationapi/__init__.py
index cc44bc62..1b9f6797 100644
--- a/ebcpy/simulationapi/__init__.py
+++ b/ebcpy/simulationapi/__init__.py
@@ -239,6 +239,8 @@ def simulate(self,
:param dict parameters:
Parameters to simulate.
Names of parameters are key, values are value of the dict.
+ It is also possible to specify a list of multiple parameter
+ dicts for different parameter variations to be simulated.
Default is an empty dict.
:param str return_option:
How to handle the simulation results. Options are:
@@ -251,10 +253,14 @@ def simulate(self,
Depending on the API, different kwargs may be used to specify file type etc.
:keyword str,os.path.normpath savepath:
If path is provided, the relevant simulation results will be saved
- in the given directory.
+ in the given directory. For multiple parameter variations also a list
+ of savepaths for each parameterset can be specified.
+ The savepaths for each parameter set must be unique.
Only relevant if return_option equals 'savepath' .
:keyword str result_file_name:
Name of the result file. Default is 'resultFile'.
+ For multiple parameter variations a list of names
+ for each result must be specified.
Only relevant if return_option equals 'savepath'.
:keyword (TimeSeriesData, pd.DataFrame) inputs:
Pandas.Dataframe of the input data for simulating the FMU with fmpy
@@ -284,6 +290,8 @@ def simulate(self,
# Handle special case for saving files:
if return_option == "savepath" and len(parameters) > 1:
savepath = kwargs.get("savepath", [])
+ if isinstance(savepath, (str, os.PathLike)):
+ savepath = [savepath] * len(parameters)
result_file_name = kwargs.get("result_file_name", [])
if (len(set(savepath)) != len(parameters) and
len(set(result_file_name)) != len(parameters)):
diff --git a/setup.py b/setup.py
index ce864dc6..d5b406f0 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
# Add all open-source packages to setup-requires
SETUP_REQUIRES = INSTALL_REQUIRES.copy()
-VERSION = "0.3.2"
+VERSION = "0.3.3"
setuptools.setup(
name='ebcpy',
| Saving simulation files of different parameter variations
When simulating different parameter variations at once, the simulation files cannot be saved in one directory (keyword: savepath) with different result file names. In the simulation api it is checked if the given savepath is a set with the length of the number of parameter variations, which causes the bug.
| 2023-01-10T12:44:38 | 0.0 | [] | [] |
|||
RedHatQE/widgetastic.patternfly | RedHatQE__widgetastic.patternfly-127 | 7a2be499857b2edf79a72d9457cdd6e650bb1844 | diff --git a/src/widgetastic_patternfly/__init__.py b/src/widgetastic_patternfly/__init__.py
index de83d77..474cebc 100644
--- a/src/widgetastic_patternfly/__init__.py
+++ b/src/widgetastic_patternfly/__init__.py
@@ -1974,7 +1974,8 @@ def active(self):
@View.nested
class date_pick(HeaderView): # noqa
- DATES = ".//*[contains(@class, 'datepicker-days')]/table/tbody/tr/td"
+ ROOT = ".//*[contains(@class, 'datepicker-days')]"
+ DATES = ".//table/tbody/tr/td"
@property
def _elements(self):
@@ -1986,7 +1987,8 @@ def _elements(self):
@View.nested
class month_pick(HeaderView): # noqa
- MONTHS = ".//*[contains(@class, 'datepicker-months')]/table/tbody/tr/td/*"
+ ROOT = ".//*[contains(@class, 'datepicker-months')]"
+ MONTHS = ".//table/tbody/tr/td/*"
@property
def _elements(self):
@@ -1998,7 +2000,8 @@ def _elements(self):
@View.nested
class year_pick(HeaderView): # noqa
- YEARS = ".//*[contains(@class, 'datepicker-years')]/table/tbody/tr/td/*"
+ ROOT = ".//*[contains(@class, 'datepicker-years')]"
+ YEARS = ".//table/tbody/tr/td/*"
@property
def _elements(self):
| Datepicker.year_pick.select results in ValueError
```python
def test_bootstrap_date_picker(browser):
<...>
# `fill` and `read` with current date
today_date = datetime.now()
> view.dp_readonly.fill(today_date)
testing/test_date_picker.py:22:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.tox/py36/lib/python3.6/site-packages/widgetastic/log.py:115: in wrapped
result = f(self, *args, **kwargs)
.tox/py36/lib/python3.6/site-packages/widgetastic/widget/base.py:30: in wrapped
return method(self, Fillable.coerce(value), *args, **kwargs)
.tox/py36/lib/python3.6/site-packages/widgetastic_patternfly/__init__.py:2060: in fill
self.year_pick.select(value=value.year)
.tox/py36/lib/python3.6/site-packages/widgetastic/widget/base.py:67: in wrapped
return method(self, *new_args, **new_kwargs)
.tox/py36/lib/python3.6/site-packages/widgetastic_patternfly/__init__.py:2018: in select
start_yr, end_yr = [int(item) for item in self.datepicker_switch.read().split('-')]
```
`self.datepicker_switch.read() is resolving the `datepicker-days` DOM and reading `November 2020` (example) instead of `2020-2029` as expected, from the `datepicker-years` DOM.
A `HeaderView` class is used to define `datepicker_switch` locator, but the parent isn't setup properly for the widget, resulting in an out-of-scope match and unexpected text.
Datepicker.year_pick.select results in ValueError
```python
def test_bootstrap_date_picker(browser):
<...>
# `fill` and `read` with current date
today_date = datetime.now()
> view.dp_readonly.fill(today_date)
testing/test_date_picker.py:22:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.tox/py36/lib/python3.6/site-packages/widgetastic/log.py:115: in wrapped
result = f(self, *args, **kwargs)
.tox/py36/lib/python3.6/site-packages/widgetastic/widget/base.py:30: in wrapped
return method(self, Fillable.coerce(value), *args, **kwargs)
.tox/py36/lib/python3.6/site-packages/widgetastic_patternfly/__init__.py:2060: in fill
self.year_pick.select(value=value.year)
.tox/py36/lib/python3.6/site-packages/widgetastic/widget/base.py:67: in wrapped
return method(self, *new_args, **new_kwargs)
.tox/py36/lib/python3.6/site-packages/widgetastic_patternfly/__init__.py:2018: in select
start_yr, end_yr = [int(item) for item in self.datepicker_switch.read().split('-')]
```
`self.datepicker_switch.read() is resolving the `datepicker-days` DOM and reading `November 2020` (example) instead of `2020-2029` as expected, from the `datepicker-years` DOM.
A `HeaderView` class is used to define `datepicker_switch` locator, but the parent isn't setup properly for the widget, resulting in an out-of-scope match and unexpected text.
| 2020-11-05T08:18:15 | 0.0 | [] | [] |
|||
shibing624/pycorrector | shibing624__pycorrector-471 | 9fe431b52c87157300eb60e282f1ecbf44826385 | diff --git a/pycorrector/confusion_corrector.py b/pycorrector/confusion_corrector.py
index 585e14ad..1867b858 100644
--- a/pycorrector/confusion_corrector.py
+++ b/pycorrector/confusion_corrector.py
@@ -5,6 +5,7 @@
åè½ï¼1ï¼è¡¥å
çº é对ï¼æåå¬åçï¼2ï¼å¯¹è¯¯æå ç½ï¼æååç¡®ç
"""
import os
+import re
from typing import List
from loguru import logger
@@ -56,10 +57,10 @@ def correct(self, sentence: str):
details = []
# èªå®ä¹æ··æ·éå å
¥çä¼¼é误è¯å
¸
for err, truth in self.custom_confusion.items():
- idx = sentence.find(err)
- if idx > -1:
- corrected_sentence = sentence[:idx] + truth + sentence[(idx + len(err)):]
- details.append((err, truth, idx))
+ for i in re.finditer(err, sentence):
+ start,end = i.span()
+ corrected_sentence = corrected_sentence[:start] + truth + corrected_sentence[end:]
+ details.append((err, truth, start))
return {'source': sentence, 'target': corrected_sentence, 'errors': details}
def correct_batch(self, sentences: List[str]):
diff --git a/pycorrector/detector.py b/pycorrector/detector.py
index 8dca07c0..7513e308 100644
--- a/pycorrector/detector.py
+++ b/pycorrector/detector.py
@@ -4,6 +4,7 @@
@description: error word detector
"""
import os
+import re
from codecs import open
import numpy as np
@@ -396,9 +397,8 @@ def _detect(self, sentence, start_idx=0, **kwargs):
self.check_detector_initialized()
# 1. èªå®ä¹æ··æ·éå å
¥çä¼¼é误è¯å
¸
for confuse in self.custom_confusion:
- idx = sentence.find(confuse)
- if idx > -1:
- maybe_err = [confuse, idx + start_idx, idx + len(confuse) + start_idx, ErrorType.confusion]
+ for i in re.finditer(confuse, sentence):
+ maybe_err = [confuse, i.span()[0] + start_idx, i.span()[1] + start_idx, ErrorType.confusion]
self._add_maybe_error_item(maybe_err, maybe_errors)
# 2. ä¸åé误æ£æµ
| æ··æ·éç¸å
³æ¹æ³åå¨ä¸¤ä¸ª bug
### 1. kenlm
åå¨é®é¢ï¼åä¸ä¸ªéåéå¤åºç°æ¶åªä¿®æ£äºç¬¬ä¸æ¬¡åºç°çcase
æåç°å¦ææ··æ·éä¸åä¸ä¸ªè¯å¨å¥åä¸éå¤åºç°ï¼åªä¼ä¿®æ¹ç¬¬ä¸æ¬¡åºç°çã
举个ä¾åï¼
æ··æ·é
```
èª æ
祢 ä½
```
ä¾å¥
```
s= "èªæ³è¯´èªç±ç¥¢"
m_custom = Corrector(custom_confusion_path_or_dict = "./my_custom_confusion.txt")
m_custom.correct(s)
```
ç»æ
```
{'source': 'èªæ³è¯´èªç±ç¥¢', 'target': 'ææ³è¯´èªç±ä½ ', 'errors': [('èª', 'æ', 0), ('祢', 'ä½ ', 5)]}
```
第äºä¸ªâèªâå没æ被æ¢æã
### 2. confusion pipeline
使ç¨confusion pipelineæ¶ï¼ä¸é¢åä¸ä¸ªä¾åï¼ä½æ¯âèªâå两å¤é½æ²¡æ被æ¹æ
```
from pycorrector import ConfusionCorrector
confusion_dict = {"èª": "æ", "祢": "ä½ "}
model_confusion = ConfusionCorrector(custom_confusion_path_or_dict=confusion_dict)
model_confusion.correct("èªæ³è¯´èªç±ç¥¢")
```
ç»æ
```
{'source': 'èªæ³è¯´èªç±ç¥¢',
'target': 'èªæ³è¯´èªç±ä½ ',
'errors': [('èª', 'æ', 0), ('祢', 'ä½ ', 5)]}
```
æ£æµå°ç¬¬ä¸ä¸ª'èª'ï¼ä½ä¸¤å¤'èª'é½æ²¡è¢«æ¹æã
| I had these two issues fixed. I will submit a PR later. | 2024-01-30T09:21:49 | 0.0 | [] | [] |
||
deepmodeling/dpgen | deepmodeling__dpgen-971 | 3319a617807bfdb6c9d3a47987c35f277399ec22 | diff --git a/dpgen/generator/run.py b/dpgen/generator/run.py
index 7244e6af3..8e468fabb 100644
--- a/dpgen/generator/run.py
+++ b/dpgen/generator/run.py
@@ -2790,8 +2790,7 @@ def make_fp_gaussian(iter_index,
with open('input', 'w') as fp:
fp.write(ret)
os.chdir(cwd)
- # link pp files
- _link_fp_vasp_pp(iter_index, jdata)
+
def make_fp_cp2k (iter_index,
jdata):
| how to set fp for gaussian in param.json
Hi dper,
when I run a dpgen task, i want to calculate dft with gaussian, setup of this part is as follow:
{...
"fp_style":"gaussian",
"shuffle_poscar":"false",
"fp_task_min":5,
"fp_task_max":20,
"use_clusters":false,
"fp_params":{
"keywords":"force b3lyp/genecp scf(Fermi,Vshift=300,Ndamp=30)" ,
"multiplicity":1,
"nproc":6,
"basis_set":"C H O 0\n6-31G*\n****\nSe Cd 0\nLANL2DZ\n****\nSe Cd 0\nLANL2DZ"
}
}
and the program will return an error:
Traceback (most recent call last):
File "/home/ljgroup1/.local/bin/dpgen", line 8, in <module>
sys.exit(main())
File "/home/ljgroup1/.local/lib/python3.9/site-packages/dpgen/main.py", line 185, in main
args.func(args)
File "/home/ljgroup1/.local/lib/python3.9/site-packages/dpgen/generator/run.py", line 3642, in gen_run
run_iter (args.PARAM, args.MACHINE)
File "/home/ljgroup1/.local/lib/python3.9/site-packages/dpgen/generator/run.py", line 3625, in run_iter
make_fp (ii, jdata, mdata)
File "/home/ljgroup1/.local/lib/python3.9/site-packages/dpgen/generator/run.py", line 2825, in make_fp
make_fp_gaussian(iter_index, jdata)
File "/home/ljgroup1/.local/lib/python3.9/site-packages/dpgen/generator/run.py", line 2659, in make_fp_gaussian
_link_fp_vasp_pp(iter_index, jdata)
File "/home/ljgroup1/.local/lib/python3.9/site-packages/dpgen/generator/run.py", line 2356, in _link_fp_vasp_pp
fp_pp_path = jdata['fp_pp_path']
KeyError: 'fp_pp_path'
this means that i need assign pseudopotential files for gaussian, and but i have assigned my basis set and pseudopotential via writing "basis_set" by writing basis set and pseudopotential in string format in fp_params, by refer the code of gaussian.py, this part code in gaussian.py is as follow:
if 'basis_set' in fp_params:
# custom basis set
buff.extend(['', fp_params['basis_set'], ''])
#print(type(fp_params['basis_set']))
for kw in itertools.islice(keywords, 1, None):
buff.extend(['\n--link1--', *chkkeywords, nprockeywords,
'#{}'.format(kw), '', titlekeywords, '', chargekeywords, ''])
return '\n'.join(buff)
when i check the code of "make_fp_gaussian part", i find at this end of this function, a function named "_link_fp_vasp_pp(iter_index, jdata)" is called, that means i need rewrite the pseudopotential and basis set used in gaussian to a vasp readable pseudopotential file for gaussian calculation? I do not know how to deal with this problem. And i also find "_link_fp_vasp_pp(iter_index, jdata)" also be called by other fp function from pwscf to cp2k.
| 2022-09-26T21:38:00 | 0.0 | [] | [] |
|||
VIDA-NYU/reprozip | VIDA-NYU__reprozip-392 | b4be884dc4eaccaffae45b1efbe10a6041244a81 | diff --git a/reprounzip/reprounzip/common.py b/reprounzip/reprounzip/common.py
index 38c75798b..a8d43ba42 100644
--- a/reprounzip/reprounzip/common.py
+++ b/reprounzip/reprounzip/common.py
@@ -49,6 +49,7 @@
FILE_WDIR = 0x04
FILE_STAT = 0x08
FILE_LINK = 0x10
+FILE_SOCKET = 0x20
class File(object):
@@ -736,7 +737,7 @@ def setup_logging(tag, verbosity):
file_level = logging.INFO
min_level = min(console_level, file_level)
- # Create formatter, with same format as C extension
+ # Create formatter
fmt = "[%s] %%(asctime)s %%(levelname)s: %%(message)s" % tag
formatter = LoggingDateFormatter(fmt)
diff --git a/reprozip/native/database.h b/reprozip/native/database.h
index 7fa7c602b..e2e8f434d 100644
--- a/reprozip/native/database.h
+++ b/reprozip/native/database.h
@@ -6,6 +6,7 @@
#define FILE_WDIR 0x04 /* File is used as a process's working dir */
#define FILE_STAT 0x08 /* File is stat()d (only metadata is read) */
#define FILE_LINK 0x10 /* The link itself is accessed, no dereference */
+#define FILE_SOCKET 0x20 /* The file is a UNIX domain socket */
int db_init(const char *filename);
int db_close(int rollback);
diff --git a/reprozip/native/syscalls.c b/reprozip/native/syscalls.c
index 88cbca5d6..d2f550f65 100644
--- a/reprozip/native/syscalls.c
+++ b/reprozip/native/syscalls.c
@@ -13,6 +13,7 @@
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
+#include <sys/un.h>
#include <unistd.h>
#include "config.h"
@@ -68,31 +69,6 @@ static char *abs_path_arg(const struct Process *process, size_t arg)
}
-static const char *print_sockaddr(void *address, socklen_t addrlen)
-{
- static char buffer[512];
- const short family = ((struct sockaddr*)address)->sa_family;
- if(family == AF_INET && addrlen >= sizeof(struct sockaddr_in))
- {
- struct sockaddr_in *address_ = address;
- snprintf(buffer, 512, "%s:%d",
- inet_ntoa(address_->sin_addr),
- ntohs(address_->sin_port));
- }
- else if(family == AF_INET6
- && addrlen >= sizeof(struct sockaddr_in6))
- {
- struct sockaddr_in6 *address_ = address;
- char buf[50];
- inet_ntop(AF_INET6, &address_->sin6_addr, buf, sizeof(buf));
- snprintf(buffer, 512, "[%s]:%d", buf, ntohs(address_->sin6_port));
- }
- else
- snprintf(buffer, 512, "<unknown destination, sa_family=%d>", family);
- return buffer;
-}
-
-
/* ********************
* Other syscalls that might be of interest but that we don't handle yet
*/
@@ -861,6 +837,46 @@ int syscall_fork_event(struct Process *process, unsigned int event)
* Network connections
*/
+static int handle_socket(struct Process *process, const char *msg,
+ void *address, socklen_t addrlen)
+{
+ const short family = ((struct sockaddr*)address)->sa_family;
+ if(family == AF_INET && addrlen >= sizeof(struct sockaddr_in))
+ {
+ struct sockaddr_in *address_ = address;
+ log_info(process->tid, "%s %s:%d", msg,
+ inet_ntoa(address_->sin_addr),
+ ntohs(address_->sin_port));
+ }
+ else if(family == AF_INET6
+ && addrlen >= sizeof(struct sockaddr_in6))
+ {
+ struct sockaddr_in6 *address_ = address;
+ char buf[50];
+ inet_ntop(AF_INET6, &address_->sin6_addr, buf, sizeof(buf));
+ log_info(process->tid, "%s [%s]:%d", msg,
+ buf, ntohs(address_->sin6_port));
+ }
+ else if(family == AF_UNIX)
+ {
+ struct sockaddr_un *address_ = address;
+ char buf[109];
+ strncpy(buf, &address_->sun_path, 108);
+ buf[108] = 0;
+ log_info(process->tid, "%s unix:%s", msg, buf);
+
+ if(db_add_file_open(process->identifier,
+ buf,
+ FILE_SOCKET | FILE_WRITE,
+ 0) != 0)
+ return -1; /* LCOV_EXCL_LINE */
+ }
+ else
+ log_info(process->tid, "%s <unknown destination, sa_family=%d>",
+ msg, family);
+ return 0;
+}
+
static int handle_accept(struct Process *process,
void *arg1, void *arg2)
{
@@ -870,8 +886,9 @@ static int handle_accept(struct Process *process,
{
void *address = malloc(addrlen);
tracee_read(process->tid, address, arg1, addrlen);
- log_info(process->tid, "process accepted a connection from %s",
- print_sockaddr(address, addrlen));
+ if(handle_socket(process, "process accepted a connection from",
+ address, addrlen) != 0)
+ return -1; /* LCOV_EXCL_LINE */
free(address);
}
return 0;
@@ -884,8 +901,9 @@ static int handle_connect(struct Process *process,
{
void *address = malloc(addrlen);
tracee_read(process->tid, address, arg1, addrlen);
- log_info(process->tid, "process connected to %s",
- print_sockaddr(address, addrlen));
+ if(handle_socket(process, "process connected to",
+ address, addrlen) != 0)
+ return -1; /* LCOV_EXCL_LINE */
free(address);
}
return 0;
diff --git a/reprozip/reprozip/common.py b/reprozip/reprozip/common.py
index 38c75798b..a8d43ba42 100644
--- a/reprozip/reprozip/common.py
+++ b/reprozip/reprozip/common.py
@@ -49,6 +49,7 @@
FILE_WDIR = 0x04
FILE_STAT = 0x08
FILE_LINK = 0x10
+FILE_SOCKET = 0x20
class File(object):
@@ -736,7 +737,7 @@ def setup_logging(tag, verbosity):
file_level = logging.INFO
min_level = min(console_level, file_level)
- # Create formatter, with same format as C extension
+ # Create formatter
fmt = "[%s] %%(asctime)s %%(levelname)s: %%(message)s" % tag
formatter = LoggingDateFormatter(fmt)
diff --git a/reprozip/reprozip/tracer/trace.py b/reprozip/reprozip/tracer/trace.py
index 8dde8ef28..37dfdfd59 100644
--- a/reprozip/reprozip/tracer/trace.py
+++ b/reprozip/reprozip/tracer/trace.py
@@ -11,6 +11,7 @@
from __future__ import division, print_function, unicode_literals
+import contextlib
import distro
from collections import defaultdict
from itertools import count
@@ -26,7 +27,7 @@
from reprozip import __version__ as reprozip_version
from reprozip import _pytracer
from reprozip.common import File, InputOutputFile, load_config, save_config, \
- FILE_READ, FILE_WRITE, FILE_LINK
+ FILE_READ, FILE_WRITE, FILE_LINK, FILE_SOCKET
from reprozip.tracer.linux_pkgs import magic_dirs, system_dirs, \
identify_packages
from reprozip.utils import PY3, izip, iteritems, itervalues, \
@@ -36,6 +37,21 @@
logger = logging.getLogger('reprozip')
+systemd_sockets = ('/run/systemd/private', '/run/dbus/system_bus_socket')
+
+
[email protected]
+def stderr_in_red():
+ if os.isatty(sys.stderr.fileno()):
+ try:
+ print('\x1b[31;20m', file=sys.stderr, end='', flush=True)
+ yield
+ finally:
+ print('\x1b[0m', file=sys.stderr, end='', flush=True)
+ else:
+ yield
+
+
class TracedFile(File):
"""Override of `~reprozip.common.File` that reads stats from filesystem.
@@ -145,6 +161,7 @@ def get_files(conn):
ORDER BY timestamp;
''')
executed = set()
+ systemd_accessed = False
run = 0
for event_type, r_name, r_mode, r_timestamp in rows:
if event_type == 'exec':
@@ -178,6 +195,9 @@ def get_files(conn):
f = files[r_name]
if r_mode & FILE_READ:
f.read(run)
+ if r_mode & FILE_SOCKET:
+ if r_name in systemd_sockets:
+ systemd_accessed = True
if r_mode & FILE_WRITE:
f.write(run)
# Mark the parent directory as read
@@ -230,21 +250,32 @@ def get_files(conn):
inputs = [[path for path in lst if path in files]
for lst in inputs]
- # Displays a warning for READ_THEN_WRITTEN files
+ # Display a warning for READ_THEN_WRITTEN files
read_then_written_files = [
fi
for fi in itervalues(files)
if fi.what == TracedFile.READ_THEN_WRITTEN and
not any(fi.path.lies_under(m) for m in magic_dirs)]
if read_then_written_files:
- logger.warning(
- "Some files were read and then written. We will only pack the "
- "final version of the file; reproducible experiments shouldn't "
- "change their input files")
+ with stderr_in_red():
+ logger.warning(
+ "Some files were read and then written. We will only pack the "
+ "final version of the file; reproducible experiments "
+ "shouldn't change their input files")
logger.info("Paths:\n%s",
", ".join(unicode_(fi.path)
for fi in read_then_written_files))
+ # Display a warning for systemd
+ if systemd_accessed:
+ with stderr_in_red():
+ logger.warning(
+ "A connection to systemd was detected. If systemd was asked "
+ "to start a process, it won't be captured by reprozip, "
+ "because it is an independent server. Please see "
+ "https://docs.reprozip.org/s/systemd.html for more "
+ "information")
+
files = set(
fi
for fi in itervalues(files)
| systemd doesn't work well
Using systemd is a bad idea, as starting services in a chrooted environment has a lot of issues. systemd itself uses a server, local sockets, and cgroups. I have no real plan to try and support that.
| We should try and detect this and show a warning, perhaps pointing to http://docs.reprozip.org/en/1.x/troubleshooting.html#systemd | 2023-03-03T03:41:19 | 0.0 | [] | [] |
||
Morisset/PyNeb_devel | Morisset__PyNeb_devel-31 | 26b62ee65290b308bed0b162846bb87e371e30b1 | diff --git a/pyneb/core/diags.py b/pyneb/core/diags.py
index 8222ad3..30c7a3b 100644
--- a/pyneb/core/diags.py
+++ b/pyneb/core/diags.py
@@ -10,8 +10,6 @@
from pyneb.utils.misc import int_to_roman, parseAtom, parseAtom2
from pyneb.utils.init import BLEND_LIST
from pyneb import config
-if config.INSTALLED['ai4neb']:
- from ai4neb import manage_RM
diags_dict = {}
@@ -778,6 +776,10 @@ def B(label, I=I, L=L):
else:
if type(value_den) == type([]): value_den = np.asarray(value_den)
if use_ANN:
+
+ if config.INSTALLED['ai4neb']:
+ from ai4neb import manage_RM
+
if not config.INSTALLED['ai4neb']:
self.log_.error('_getPopulations_ANN cannot be used in absence of ai4neb package',
calling=self.calling)
diff --git a/pyneb/core/icf.py b/pyneb/core/icf.py
index 9508380..226ec04 100644
--- a/pyneb/core/icf.py
+++ b/pyneb/core/icf.py
@@ -402,26 +402,25 @@ def _init_all_icfs(self):
# Wrong comment. Corrected 26 Dec 2014
# 'comment': 'Based on a grid of photoionization models. To be used if both O4 and N5 detected'},
'comment': 'Based on a grid of photoionization models. Valid if both S2 and S3 detected. He2 detected, He3 not.'},
-# Added 26 Dec 2014
# 26 Dec 2014 These icfs are temporarily commented out because they must be still be checked.
-# 'KB94_A38.6':{'elem': 'S',
-# 'atom': 'abun["S2"]',
-# 'icf': '((1 - (1 - abun["O2"]/elem_abun["KB94_A6"])**3)**(-1./3.)*(5.677 + (abun["O3"]/abun["O2"])**(0.433))',
-# 'type': 'PNe',
-# 'comment': 'Based on a grid of photoionization models and the S3/S2 ratio of a sample of PNe. Valid if S2 is detected but S3 is not detected'},
+# June 2023, after talking with L. Stanghellini, it appears there is a typo in KB94_A38, 4.677 + instead of 4.677 *
+ 'KB94_A38.6':{'elem': 'S',
+ 'atom': 'abun["S2"]',
+ 'icf': '((1 - (1 - abun["O2"]/elem_abun["KB94_A6"])**3)**(-1./3.)) * (4.677 * (abun["O3"]/abun["O2"])**(0.433))',
+ 'type': 'PNe',
+ 'comment': 'Based on a grid of photoionization models and the S3/S2 ratio of a sample of PNe. Valid if S2 is detected but S3 is not detected'},
# Added 26 Dec 2014
-# 'KB94_A38.8':{'elem': 'S',
-# 'atom': 'abun["S2"]',
-# 'icf': '((1 - (1 - abun["O2"]/elem_abun["KB94_A8"])**3)**(-1./3.)*(5.677 + (abun["O3"]/abun["O2"])**(0.433))',
-# 'type': 'PNe',
-# 'comment': 'BBased on a grid of photoionization models and the S3/S2 ratio of a sample of PNe. Valid if S2 is detected but S3 is not detected'},
+ 'KB94_A38.8':{'elem': 'S',
+ 'atom': 'abun["S2"]',
+ 'icf': '((1 - (1 - abun["O2"]/elem_abun["KB94_A8"])**3)**(-1./3.)) * (4.677 * (abun["O3"]/abun["O2"])**(0.433))',
+ 'type': 'PNe',
+ 'comment': 'BBased on a grid of photoionization models and the S3/S2 ratio of a sample of PNe. Valid if S2 is detected but S3 is not detected'},
# Added 26 Dec 2014
-# 'KB94_A38.10':{'elem': 'S',
-# 'atom': 'abun["S2"]',
-# 'icf': '((1 - (1 - abun["O2"]/elem_abun["KB94_A10"])**3)**(-1./3.)*(5.677 + (abun["O3"]/abun["O2"])**(0.433))',
-# 'type': 'PNe',
-# 'comment': 'Based on a grid of photoionization models and the S3/S2 ratio of a sample of PNe. Valid if S2 is detected but S3 is not detected'},
-# end commented block
+ 'KB94_A38.10':{'elem': 'S',
+ 'atom': 'abun["S2"]',
+ 'icf': '((1 - (1 - abun["O2"]/elem_abun["KB94_A10"])**3)**(-1./3.)) * (4.677 * (abun["O3"]/abun["O2"])**(0.433))',
+ 'type': 'PNe',
+ 'comment': 'Based on a grid of photoionization models and the S3/S2 ratio of a sample of PNe. Valid if S2 is detected but S3 is not detected'},
'KH01_4a': {'elem': 'He',
'atom': 'abun["He2"] + abun["He3"]',
'icf': '1',
diff --git a/pyneb/core/pynebcore.py b/pyneb/core/pynebcore.py
index 383c1d9..df846c2 100755
--- a/pyneb/core/pynebcore.py
+++ b/pyneb/core/pynebcore.py
@@ -2581,7 +2581,7 @@ def printIonic(self, tem=None, den=None, printA=False, printPop=True, printCrit=
for i in range(1, self.NLevels):
if printA:
for j in range(i):
- to_print = "{0:.3E} ".format(np.float(self.getA(i + 1, j + 1)))
+ to_print = "{0:.3E} ".format(np.float64(self.getA(i + 1, j + 1)))
print(to_print, end="")
print("")
for j in range(i):
@@ -3417,8 +3417,8 @@ def _loadTotRecombination(self):
f = open(self.TotRecFile)
data = f.readlines()
f.close()
- den_points = [np.float(d) for d in data[0].split()]
- tem_points = [np.float(d) for d in data[1].split()]
+ den_points = [np.float64(d) for d in data[0].split()]
+ tem_points = [np.float64(d) for d in data[1].split()]
self.alpha_grid = np.array([d.split() for d in data if d[0:3]!='***'][2::], dtype='float')
self.lg_den_grid, self.lg_tem_grid = np.meshgrid(np.log10(den_points), np.log10(tem_points))
@@ -4933,7 +4933,7 @@ def __normalize(self, label='H1_4861A'):
"""
if "=" in label:
line_label, factor = label.split('=')
- factor = np.float(factor)
+ factor = np.float64(factor)
else:
line_label = label
factor = 1.
diff --git a/pyneb/utils/Config.py b/pyneb/utils/Config.py
index 2f7dcdf..5407840 100644
--- a/pyneb/utils/Config.py
+++ b/pyneb/utils/Config.py
@@ -100,12 +100,8 @@ def __init__(self):
self.INSTALLED['cvxopt'] = True
except:
self.INSTALLED['cvxopt'] = False
- try:
- from ai4neb import manage_RM
- self.INSTALLED['ai4neb'] = True
- except:
- self.INSTALLED['ai4neb'] = False
-
+
+ self.INSTALLED['ai4neb'] = False
self.DataFiles = {}
self.unuse_multiprocs()
@@ -118,6 +114,13 @@ def __init__(self):
self.vactoair_low_wl = 2000. # UV in vacuum
self.vactoair_high_wl = 1e30 # no upper limit, IR in air!!!
+ def import_ai4neb(self):
+ try:
+ from ai4neb import manage_RM
+ self.INSTALLED['ai4neb'] = True
+ except:
+ self.INSTALLED['ai4neb'] = False
+
def set_noExtrapol(self, value):
self._noExtrapol = bool(value)
diff --git a/pyneb/utils/misc.py b/pyneb/utils/misc.py
index 3aecd9e..8defe5c 100644
--- a/pyneb/utils/misc.py
+++ b/pyneb/utils/misc.py
@@ -389,10 +389,10 @@ def get_reduced(N_rand, a, value_method = 'original', error_method='std'):
# error = (quants[1]-quants[0])/2)
elif error_method == 'upper':
mask = (a - value) >= 0
- error = ((((a[mask] - value)**2).sum())/np.float(mask.sum()))**0.5
+ error = ((((a[mask] - value)**2).sum())/np.float64(mask.sum()))**0.5
elif error_method == 'lower':
mask = (a - value) <= 0
- error = -((((a[mask] - value)**2).sum())/np.float(mask.sum()))**0.5
+ error = -((((a[mask] - value)**2).sum())/np.float64(mask.sum()))**0.5
elif error_method == 'std':
error = a.std()
else:
diff --git a/pyneb/version.py b/pyneb/version.py
index 8b857ab..fcebaee 100644
--- a/pyneb/version.py
+++ b/pyneb/version.py
@@ -1,2 +1,2 @@
# PyNeb version
-__version__ = '1.1.17'
+__version__ = '1.1.18'
| V1.18
* Fix the last np.float to np.float64 in pynebcore and misc
* Correct bug in ICF of S+, allow it again (it was set unavailable)
| 2023-08-08T15:58:17 | 0.0 | [] | [] |
|||
ml-explore/mlx-examples | ml-explore__mlx-examples-140 | 44b546d44652d1980c0a78c4ea6ac4eea2c1fad7 | diff --git a/llama/llama.py b/llama/llama.py
index af98685dd..ad6fd8ce0 100644
--- a/llama/llama.py
+++ b/llama/llama.py
@@ -294,7 +294,7 @@ def generate(question):
mx.eval(token)
prompt_processing = toc("Prompt processing", start)
- if len(tokens) >= args.num_tokens:
+ if len(tokens) >= args.max_tokens:
break
mx.eval(tokens)
| [llama] --few-shot broken
Running the few show example
```
python llama.py llama-2-7b tokenizer.model --max-tokens 1000 --few-shot --prompt sample_prompt.txt
```
is now broken:
```
Traceback (most recent call last):
File "/.../mlx-examples/llama/llama.py", line 388, in <module>
few_shot_generate(args)
File "/...//mlx-examples/llama/llama.py", line 324, in few_shot_generate
generate(prompt.replace("{}", question))
File "/...//mlx-examples/llama/llama.py", line 297, in generate
if len(tokens) >= args.num_tokens:
^^^^^^^^^^^^^^^
AttributeError: 'Namespace' object has no attribute 'num_tokens'. Did you mean: 'max_tokens'?
```
| Yup, that's a bug. Will fix! | 2023-12-18T18:09:48 | 0.0 | [] | [] |
||
Capsize-Games/airunner | Capsize-Games__airunner-357 | 7ff1e3564aa05aef90303aa69c9e986923d32677 | diff --git a/src/airunner/data/db.py b/src/airunner/data/db.py
index e210a868d..a790d344f 100644
--- a/src/airunner/data/db.py
+++ b/src/airunner/data/db.py
@@ -20,10 +20,10 @@
import configparser
session = get_session()
-
+do_stamp_alembic = False
# check if database is blank:
if not session.query(Prompt).first():
-
+ do_stamp_alembic = True
# Add Prompt objects
for prompt_option, data in prompt_bootstrap_data.items():
category = PromptCategory(name=prompt_option, negative_prompt=data["negative_prompt"])
@@ -295,14 +295,14 @@ def insert_variables(variables, prev_object=None):
},
}
- for generator_name, generator_sections in sections_bootstrap_data.items():
- for generator_section in generator_sections:
- settings.generator_settings.append(GeneratorSetting(
- section=generator_section,
- generator_name=generator_name,
- active_grid_border_color=active_grid_colors[generator_name]["border"][generator_section],
- active_grid_fill_color=active_grid_colors[generator_name]["fill"][generator_section]
- ))
+ generator_section = "txt2img"
+ generator_name = "stablediffusion"
+ session.add(GeneratorSetting(
+ section=generator_section,
+ generator_name=generator_name,
+ active_grid_border_color=active_grid_colors[generator_name]["border"][generator_section],
+ active_grid_fill_color=active_grid_colors[generator_name]["fill"][generator_section]
+ ))
session.add(Document(
name="Untitled",
@@ -509,4 +509,7 @@ def insert_variables(variables, prev_object=None):
with open(alembic_ini_path, 'w') as configfile:
config.write(configfile)
alembic_cfg = Config(alembic_ini_path)
-command.upgrade(alembic_cfg, "head")
+if not do_stamp_alembic:
+ command.upgrade(alembic_cfg, "head")
+else:
+ command.stamp(alembic_cfg, "head")
diff --git a/src/airunner/widgets/lora/lora_container_widget.py b/src/airunner/widgets/lora/lora_container_widget.py
index 3fa5ad6c4..b516499a8 100644
--- a/src/airunner/widgets/lora/lora_container_widget.py
+++ b/src/airunner/widgets/lora/lora_container_widget.py
@@ -33,8 +33,8 @@ def loras(self):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.load_lora()
self.scan_for_lora()
+ self.load_lora()
def load_lora(self):
session = get_session()
@@ -57,15 +57,14 @@ def add_lora(self, lora):
def scan_for_lora(self):
session = get_session()
lora_path = self.settings_manager.path_settings.lora_path
- with os.scandir(lora_path) as dir_object:
- for entry in dir_object:
- if entry.is_file(): # ckpt or safetensors file
- if entry.name.endswith(".ckpt") or entry.name.endswith(".safetensors") or entry.name.endswith(
- ".pt"):
- name = entry.name.replace(".ckpt", "").replace(".safetensors", "").replace(".pt", "")
- lora = Lora(name=name, path=entry.path, enabled=True, scale=100.0)
- session.add(lora)
- save_session(session)
+ for dirpath, dirnames, filenames in os.walk(lora_path):
+ for file in filenames:
+ if file.endswith(".ckpt") or file.endswith(".safetensors") or file.endswith(".pt"):
+ print("adding lora to session")
+ name = file.replace(".ckpt", "").replace(".safetensors", "").replace(".pt", "")
+ lora = Lora(name=name, path=os.path.join(dirpath, file), enabled=True, scale=100.0)
+ session.add(lora)
+ save_session(session)
def toggle_all_lora(self, checked):
for i in range(self.ui.lora_scroll_area.widget().layout().count()):
| Existing LORA not showing up on first load of database
| 2024-01-09T21:38:31 | 0.0 | [] | [] |
|||
jorgeajimenezl/aiodav | jorgeajimenezl__aiodav-15 | f4ed86e551c14764e8b31b837a85dc7d0bcd558e | diff --git a/aiodav/utils.py b/aiodav/utils.py
index 59ea371..ad9e87e 100644
--- a/aiodav/utils.py
+++ b/aiodav/utils.py
@@ -110,16 +110,18 @@ def get_info_from_response(response):
`modified`: date of resource modification,
`etag`: etag of resource
"""
- find_attributes = {
- "created": ".//{DAV:}creationdate",
- "name": ".//{DAV:}displayname",
- "size": ".//{DAV:}getcontentlength",
- "modified": ".//{DAV:}getlastmodified",
- "etag": ".//{DAV:}getetag",
- }
+ find_attributes = (
+ ("created", ".//{DAV:}creationdate"),
+ ("name", ".//{DAV:}displayname"),
+ ("name", ".//{DAV:}displayName"),
+ ("size", ".//{DAV:}getcontentlength"),
+ ("modified", ".//{DAV:}getlastmodified"),
+ ("etag", ".//{DAV:}getetag"),
+ )
info = dict()
- for (name, value) in find_attributes.items():
- info[name] = response.findtext(value)
+ for (name, value) in find_attributes:
+ if name not in info or not info[name]:
+ info[name] = response.findtext(value)
return info
@staticmethod
| Names are not retrieved for WebDav servers that use displayName in their response XML
Some WebDAV implementations seem to use `displayName` in their XML response instead of the more common `displayname` key.
```xml
<d:multistatus xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns" xmlns:x1="http://open-collaboration-services.org/ns">
<d:response>
<d:href>/dav.php/@Home/new_library/</d:href>
<d:propstat>
<d:prop>
<d:displayName>new_library</d:displayName>
<d:getetag>80a1745cfaa43b2057f781413209ff90</d:getetag>
<d:getlastmodified>Tue, 22 Aug 2023 08:10:49 GMT</d:getlastmodified>
<d:creationdate>2023-08-22T10:10:49+02:00</d:creationdate>
<oc:id>02320738</oc:id>
<d:resourcetype>
<d:collection/>
</d:resourcetype>
<d:iscollection>1</d:iscollection>
<d:isFolder>t</d:isFolder>
<oc:permissions>RGDNVCK</oc:permissions>
<x1:share-permissions>31</x1:share-permissions>
</d:prop>
<d:status>HTTP/1.1 200 OK</d:status>
</d:propstat>
</d:response>
</d:multistatus>
```
| 2023-08-22T14:29:29 | 0.0 | [] | [] |
|||
discsim/frank | discsim__frank-221 | 535c63a77c3595c2e3f7be9ad64182fad0737335 | diff --git a/frank/statistical_models.py b/frank/statistical_models.py
index 46db07b7..d286ba64 100644
--- a/frank/statistical_models.py
+++ b/frank/statistical_models.py
@@ -674,8 +674,8 @@ def __init__(self, DHT, M, j, p=None, scale=None, guess=None,
" your UVtable (incorrect units or weights), "
" or the deprojection being applied (incorrect"
" geometry and/or phase center). Else you may"
- " want to increase `rout` by 10-20% or `n` so"
- " that it is large, >~300.")
+ " want to adjust `rout` (ensure it is larger than"
+ " the source) or `n` (up to ~300).")
Ykm = self._DHT.coefficients()
Sj = np.einsum('ji,lj,jk->lik', Ykm, 1/p, Ykm)
| Warning about bad power spectrum
I was getting the following warning for certain choices of parameters: "Bad value in power spectrum. The power spectrum must be positive and not contain any NaN values. This is likely due to your UVtable (incorrect units or weights), or the deprojection being applied (incorrect geometry and/or phase center). Else you may want to increase `rout` by 10-20% or `n` so that it is large, >~300." In my case, I found that _decreasing_ rather than increasing r_out fixed things, so it may be helpful to users to make the warning more general.
| 2024-08-21T20:04:49 | 0.0 | [] | [] |
|||
pyenergyplus/witheppy | pyenergyplus__witheppy-46 | 26aa295d5c06cad7ed107dd7dd0f02f353161536 | diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..ec5ed2d
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,75 @@
+build: false
+
+for:
+- branches:
+ only:
+ - /.*/
+ environment:
+ matrix:
+ - PYTHON: "C:\\Python37"
+ PYTHONVERSION: 3.7
+ ARCHITECTURE: "i386"
+ MINICONDA: "C:\\Miniconda37"
+ ENERGYPLUS_VERSION: "9.0.1"
+ ENERGYPLUS_SHA: "bb7ca4f0da"
+ - PYTHON: "C:\\Python37-x64"
+ PYTHONVERSION: 3.7
+ ARCHITECTURE: "x86_64"
+ MINICONDA: "C:\\Miniconda37-x64"
+ ENERGYPLUS_VERSION: "9.0.1"
+ ENERGYPLUS_SHA: "bb7ca4f0da"
+
+-
+ branches:
+ only:
+ - master
+ environment:
+ matrix:
+ - PYTHON: "C:\\Python37"
+ PYTHONVERSION: 3.7
+ ARCHITECTURE: "i386"
+ MINICONDA: "C:\\Miniconda37"
+ ENERGYPLUS_VERSION: "9.0.1"
+ ENERGYPLUS_SHA: "bb7ca4f0da"
+ - PYTHON: "C:\\Python37-x64"
+ PYTHONVERSION: 3.7
+ ARCHITECTURE: "x86_64"
+ MINICONDA: "C:\\Miniconda37-x64"
+ ENERGYPLUS_VERSION: "9.0.1"
+ ENERGYPLUS_SHA: "bb7ca4f0da"
+
+install:
+ # set up Miniconda test environment
+ - "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%"
+ - conda config --set always_yes yes --set changeps1 no
+ - conda info -a
+ - conda update -q conda
+ - activate
+ - "conda create -n test-env python=%PYTHONVERSION% pytest lxml"
+ - "activate test-env"
+
+ - ECHO "Filesystem root:"
+ - ps: "ls \"C:/\""
+
+ - ECHO "Installed SDKs:"
+ - ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\""
+ # install EnergyPlus
+ - ECHO "Installing EnergyPlus"
+ - ps: $env:ENERGYPLUS_INSTALL_VERSION = $env:ENERGYPLUS_VERSION -replace '\.','-'
+ - ps: $env:ENERGYPLUS_DOWNLOAD_BASE_URL = "https://github.com/NREL/EnergyPlus/releases/download/v$env:ENERGYPLUS_VERSION"
+ - ps: $env:ENERGYPLUS_DOWNLOAD_FILENAME = "EnergyPlus-$env:ENERGYPLUS_VERSION-$env:ENERGYPLUS_SHA-Windows-$env:ARCHITECTURE.exe"
+ - ps: $env:ENERGYPLUS_DOWNLOAD_URL = "$env:ENERGYPLUS_DOWNLOAD_BASE_URL/$env:ENERGYPLUS_DOWNLOAD_FILENAME"
+
+ - ps: "curl $env:ENERGYPLUS_DOWNLOAD_URL -OutFile $env:TMP\\$env:ENERGYPLUS_DOWNLOAD_FILENAME"
+ - ps: "& $env:TMP\\$env:ENERGYPLUS_DOWNLOAD_FILENAME /S | Out-Null"
+
+ - ps: echo $env:PYTHON
+
+ # set the integration test environment variable
+ - "SET EPPY_INTEGRATION=TRUE"
+ # install eppy
+ - "python setup.py install"
+
+test_script:
+ # Test command.
+ - "python -m pytest ./ -v"
diff --git a/requirements.txt b/requirements.txt
index ff8f838..4aaaf01 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
-eppy
+eppy==0.5.56
# pip install -e ../eppy
# -e ../eppy
# needed for readthedocs
-nbsphinx
+nbsphinx==0.8.7
diff --git a/requirements_dev.txt b/requirements_dev.txt
index 183c12c..96ed57e 100644
--- a/requirements_dev.txt
+++ b/requirements_dev.txt
@@ -1,15 +1,15 @@
-pip
+pip==21.3.1
bump2version==1.0.1
wheel==0.32.1
watchdog==0.9.0
flake8==3.5.0
tox==3.5.2
coverage==4.5.1
-Sphinx
+Sphinx == 4.3.1
twine==1.12.1
pytest==6.2.1
pytest-runner==5.2
# nbconvert==5.6.1 # for older versions of mac.
-nbsphinx
+nbsphinx==0.8.7
black
diff --git a/runningnotes.txt b/runningnotes.txt
index 58628fc..2bbbe50 100644
--- a/runningnotes.txt
+++ b/runningnotes.txt
@@ -1,3 +1,21 @@
+2021-12-07
+----------
+
+- tutorial for hvac, mergezones
+- hvac
+ - disconnectfan
+ - removefan
+ - fanzonemap
+- set up continious testing
+- instructions for
+ - mergin pull request
+ - use gitpython to generate the history file
+ - parsing the history.rst file to insert seems harder
+ - instructions for contributors
+ - github release instructions
+ - pypi release instructions
+
+
2021-12-06
----------
diff --git a/witheppy/eppyhelpers/hvac.py b/witheppy/eppyhelpers/hvac.py
index 23f1f28..303f2c3 100644
--- a/witheppy/eppyhelpers/hvac.py
+++ b/witheppy/eppyhelpers/hvac.py
@@ -210,6 +210,7 @@ def putexhaust(idf, zone, exhaust_fan):
def removeexhaust(idf, zone):
"""remove the exhaust fan from zone if the zone has one
+
Parameters
----------
idf: eppy.modeleditor.IDF
@@ -241,6 +242,12 @@ def removeexhaust(idf, zone):
return exhfan
else:
return None
+
+# - Possible new functions
+# - disconnectfan
+# - removefan
+# - fanzonemap
+
# TODO putexhaust has a frigle point related to IDD. See:
# next two lines can break if IDD changes (see in putexhaust())
| Document a process/protocol that developers can use while contributing
- There should be clear steps that developers can take while contributing
- See https://github.com/pyenergyplus/py90dot1/blob/master/CONTRIBUTING.md as a sample
| 2021-12-12T03:38:14 | 0.0 | [] | [] |
|||
conbench/conbench | conbench__conbench-1069 | a242f2a990416aa22789f62dfd092edd7eb64333 | diff --git a/Makefile b/Makefile
index 6e06e72ed..c59a4af85 100644
--- a/Makefile
+++ b/Makefile
@@ -73,7 +73,7 @@ lint-ci:
flake8
isort --check .
black --check --diff .
-# pylint --errors-only conbench
+ pylint --errors-only conbench
mypy conbench
mypy benchalerts
diff --git a/conbench/runner.py b/conbench/runner.py
index aadf01a28..ee24a5e70 100644
--- a/conbench/runner.py
+++ b/conbench/runner.py
@@ -159,6 +159,15 @@ def benchmark(self, f, name, publish=True, **kwargs):
try:
data, output = self._get_timing(f, iterations, timing_options)
+ # It's hard to read what this next function call really does. It
+ # does _not_ publish, but I think it creates a specific data
+ # structure. Should this be in the exception handler? Within
+ # self._get_timing() above we run user-given code, so that is
+ # expected to raise exceptions, and wants to be handled. But which
+ # exceptions is self.record() expected to raise especially when
+ # _not_ doing HTTP interaction? And why do we handle those
+ # exceptions in the same way as those exceptions that are raised by
+ # user-given code?
benchmark, _ = self.record(
{"data": data, "unit": "s"},
name,
@@ -171,7 +180,7 @@ def benchmark(self, f, name, publish=True, **kwargs):
cluster_info=cluster_info,
publish=False,
)
- except Exception as e:
+ except Exception as exc:
error = {"stack_trace": traceback.format_exc()}
benchmark, _ = self.record(
None,
@@ -186,11 +195,13 @@ def benchmark(self, f, name, publish=True, **kwargs):
error=error,
publish=False,
)
- raise e
+ raise exc
finally:
if publish:
- self.publish(benchmark)
-
+ # It's a bit unclear -- is `benchmark` defined in _all_ cases
+ # when we arrive here?
+ # https://pylint.readthedocs.io/en/latest/user_guide/messages/error/used-before-assignment.html
+ self.publish(benchmark) # pylint: disable=used-before-assignment
return benchmark, output
diff --git a/conbench/util.py b/conbench/util.py
index 833191d44..833ff4482 100644
--- a/conbench/util.py
+++ b/conbench/util.py
@@ -13,7 +13,12 @@
import yaml
from _pytest.pathlib import import_path
from requests.adapters import HTTPAdapter
-from requests.packages.urllib3.util.retry import Retry
+
+try:
+ from urllib3.util import Retry
+except ImportError:
+ # Legacy around times where requests had version 2.3 something.
+ from requests.packages.urllib3.util.retry import Retry # type: ignore[no-redef]
retry_strategy = Retry(
total=5,
@@ -205,7 +210,7 @@ def _post(self, url, data, expected):
self.session.mount("https://", adapter)
start = time.monotonic()
response = self.session.post(url, json=data)
- log.info("Time to POST", url, time.monotonic() - start)
+ log.info("Time to POST %s: %.5f s", url, time.monotonic() - start)
if response.status_code != expected:
self._unexpected_response("POST", response, url)
except requests.exceptions.ConnectionError:
| conbench CLI: logging error in runner record() method
In #621 we replaced `print()`s with logging. There was a mistake in this line:
https://github.com/conbench/conbench/blob/61fac4e5c12ee4b71a493929662e580084e4186b/conbench/util.py#L173
Whenever the `Conbench.record()` method is called, this results in the following LoggingError. (The code still continues past the line.)
<details>
<summary>Traceback</summary>
```
--- Logging error ---
Traceback (most recent call last):
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 1100, in emit
msg = self.format(record)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 943, in format
return fmt.format(record)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 678, in format
record.message = record.getMessage()
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 368, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/bin/conbench", line 8, in <module>
sys.exit(conbench())
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/cli.py", line 149, in _benchmark
for result, output in benchmark().run(**kwargs):
File "my_benchmark.py", line 427, in run
yield self.conbench.record(**record_kwargs)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/runner.py", line 322, in record
self.publish(benchmark_result)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/util.py", line 154, in publish
self.post(self.config.benchmarks_url, benchmark)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/util.py", line 163, in post
self._post(self.config.login_url, self.config.credentials, 204)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/util.py", line 173, in _post
log.info("Time to POST", url, time.monotonic() - start)
Message: 'Time to POST'
Arguments: ('<url>/api/login/', 0.402172212023288)
```
</details>
conbench CLI: logging error in runner record() method
In #621 we replaced `print()`s with logging. There was a mistake in this line:
https://github.com/conbench/conbench/blob/61fac4e5c12ee4b71a493929662e580084e4186b/conbench/util.py#L173
Whenever the `Conbench.record()` method is called, this results in the following LoggingError. (The code still continues past the line.)
<details>
<summary>Traceback</summary>
```
--- Logging error ---
Traceback (most recent call last):
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 1100, in emit
msg = self.format(record)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 943, in format
return fmt.format(record)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 678, in format
record.message = record.getMessage()
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/logging/__init__.py", line 368, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Call stack:
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/bin/conbench", line 8, in <module>
sys.exit(conbench())
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/cli.py", line 149, in _benchmark
for result, output in benchmark().run(**kwargs):
File "my_benchmark.py", line 427, in run
yield self.conbench.record(**record_kwargs)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/runner.py", line 322, in record
self.publish(benchmark_result)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/util.py", line 154, in publish
self.post(self.config.benchmarks_url, benchmark)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/util.py", line 163, in post
self._post(self.config.login_url, self.config.credentials, 204)
File "/tmp/conda-01875206-650b-44b0-b980-841363c0ac72/lib/python3.10/site-packages/conbench/util.py", line 173, in _post
log.info("Time to POST", url, time.monotonic() - start)
Message: 'Time to POST'
Arguments: ('<url>/api/login/', 0.402172212023288)
```
</details>
| :) what's not tested is broken. Yes. :)
This was probably the intention:
```
log.info("Time to POST to %s: %.4f s", ...
```
Adding `devprod` label because of the challenge in the test-coverage-maintenance space. We maintain this, but it's not properly covered by tests. Improving that state would mean to do less/no maintenance, or more tests.
One question is: why did linting tooling not catch this?
:) what's not tested is broken. Yes. :)
This was probably the intention:
```
log.info("Time to POST to %s: %.4f s", ...
```
Adding `devprod` label because of the challenge in the test-coverage-maintenance space. We maintain this, but it's not properly covered by tests. Improving that state would mean to do less/no maintenance, or more tests.
One question is: why did linting tooling not catch this? | 2023-04-06T12:19:31 | 0.0 | [] | [] |
||
sizemore0125/skeletonkey | sizemore0125__skeletonkey-19 | cec49fa12721e0ea12f7a9b27429357ced374d28 | diff --git a/README.md b/README.md
index 991875e..d325ad8 100644
--- a/README.md
+++ b/README.md
@@ -63,7 +63,7 @@ For instance, if your configuration file has a nested YAML, you can overwrite th
python project.py --model.parameters.layer_size 256
```
-The resulting Namespace object will contain nested Namespace objects that can be accessed using dot notation, such as args.model.parameters.layer_size.
+The resulting Config object will contain nested Config objects that can be accessed using dot notation, such as args.model.parameters.layer_size.
#### Defining Flags in Configuration
@@ -139,7 +139,7 @@ model:
activation: relu
```
-3. When you run your project, `skeletonkey` will merge the default configuration files with the main configuration file, making the values from the default configuration files available in the `args` Namespace object:
+3. When you run your project, `skeletonkey` will merge the default configuration files with the main configuration file, making the values from the default configuration files available in the `args` Config object:
```python
print("Learning rate: ", args.learning_rate)
diff --git a/skeletonkey/__init__.py b/skeletonkey/__init__.py
index f9d690c..984a0d9 100644
--- a/skeletonkey/__init__.py
+++ b/skeletonkey/__init__.py
@@ -10,7 +10,7 @@
__version__ = "0.0.11"
-from .core import unlock, instantiate, instantiate_all
+from .core import unlock, instantiate, instantiate_all, Config
# Names to import with wildcard import
-__all__ = ["unlock", "instantiate", "instantiate_all"]
\ No newline at end of file
+__all__ = ["unlock", "instantiate", "instantiate_all", "Config"]
\ No newline at end of file
diff --git a/skeletonkey/config.py b/skeletonkey/config.py
index 6021d58..a42a72f 100644
--- a/skeletonkey/config.py
+++ b/skeletonkey/config.py
@@ -13,6 +13,82 @@
import yaml
+BASE_DEFAULT_KEYWORD: str = "defaults"
+BASE_COLLECTION_KEYWORD: str = "keyring"
+
+class Config():
+ def __init__(self, *args, **kwargs):
+ """
+ Initializes the config from a dictionary or from kwargs.\n
+
+ Args:
+ Either a single dictionary as an arg or suply a number of kwargs.
+ """
+
+ if (len(args) != 0) and (len(kwargs) != 0):
+ raise ValueError("Config should not receive args and kwargs at the same time.")
+
+ elif not (len(args) == 0 or len(args) == 1):
+ raise ValueError("Config should not receive more than one non-keyword argument.")
+
+
+ if len(args) == 1:
+ if not isinstance(args[0], dict):
+ raise ValueError("Supplied arg must be a dictionary")
+ self._init_from_dict(args[0])
+ else:
+ self._init_from_dict(kwargs)
+
+
+ def _init_from_dict(self, dictionary: dict):
+ """
+ Initialize the config from a dictionary
+
+ Args:
+ dictionary (dict): The dictionary to be converted.
+ """
+ for key, value in dictionary.items():
+ if isinstance(value, dict):
+ value = Config(value)
+
+ self[key] = value
+
+ def __getitem__(self, key:str):
+ return self.__getattribute__(key)
+
+ def __setitem__(self, key: str, value):
+ self.__setattr__(key, value)
+
+
+ def __delitem__(self, key: str):
+ self.__delattr__()
+
+ def __str__(self):
+ return self._subconfig_str(self, 0)[1:]
+
+ def __repr__(self):
+ return f"Config({self._subconfig_str(self, 1)})"
+
+ def _subconfig_str(self, subspace: "Config", tab_depth:int):
+ """
+ Convert a given subconfig to a string with the given tab-depth
+
+ args:
+ subspace: A Config object
+ tab_depth: an integer representing the current tab depth
+ """
+ s = ""
+ for k, v in subspace.__dict__.items():
+ s += "\n" + " "*tab_depth + k + ": "
+
+ if isinstance(v, Config):
+ s+= "\n"
+ s+= self._subconfig_str(v, tab_depth+1)[1:] # [1:] gets rid of uneccesary leading \n
+ else:
+ s += str(v)
+
+ return s
+
def find_yaml_path(file_path: str) -> str:
"""
@@ -171,7 +247,7 @@ def get_default_args_from_path(config_path: str, default_yaml: str) -> dict:
def load_yaml_config(
- config_path: str, config_name: str, default_keyword: str = "defaults", collection_keyword: str = "keyring"
+ config_path: str, config_name: str, default_keyword: str = BASE_DEFAULT_KEYWORD, collection_keyword: str = BASE_COLLECTION_KEYWORD
) -> dict:
"""
Load a YAML configuration file and update it with default configurations.
@@ -249,7 +325,7 @@ def unpack_collection(config, config_path, collection_keyword):
def add_args_from_dict(
- arg_parser: argparse.ArgumentParser, config: dict, prefix=""
+ arg_parser: argparse.ArgumentParser, config_dict: dict, prefix=""
) -> None:
"""
Add arguments to an ArgumentParser instance using key-value pairs from a
@@ -262,7 +338,7 @@ def add_args_from_dict(
the arguments and their default values.
prefix (str, optional): The prefix string for nested keys. Defaults to ''.
"""
- for key, value in config.items():
+ for key, value in config_dict.items():
if isinstance(value, dict):
add_args_from_dict(arg_parser, value, f"{prefix}{key}.")
else:
@@ -282,34 +358,18 @@ def add_args_from_dict(
)
-def dict_to_namespace(dictionary: dict) -> argparse.Namespace:
- """
- Convert a dictionary to an argparse.Namespace object recursively.
-
- Args:
- dictionary (dict): The dictionary to be converted.
-
- Returns:
- argparse.Namespace: A Namespace object representing the input dictionary.
- """
- for key, value in dictionary.items():
- if isinstance(value, dict):
- dictionary[key] = dict_to_namespace(value)
- return argparse.Namespace(**dictionary)
-
-
-def namespace_to_nested_namespace(namespace: argparse.Namespace) -> argparse.Namespace:
+def config_to_nested_config(config: Config) -> Config:
"""
- Convert an argparse.Namespace object with 'key1.keyn' formatted keys into a nested Namespace object.
+ Convert an Config object with 'key1.keyn' formatted keys into a nested Config object.
Args:
- namespace (argparse.Namespace): The Namespace object to be converted.
+ config (Config): The Config object to be converted.
Returns:
- argparse.Namespace: A nested Namespace representation of the input Namespace object.
+ Config: A nested Config representation of the input Config object.
"""
nested_dict = {}
- for key, value in vars(namespace).items():
+ for key, value in vars(config).items():
keys = key.split(".")
current_dict = nested_dict
for sub_key in keys[:-1]:
@@ -318,4 +378,4 @@ def namespace_to_nested_namespace(namespace: argparse.Namespace) -> argparse.Nam
current_dict = current_dict[sub_key]
current_dict[keys[-1]] = value
- return dict_to_namespace(nested_dict)
+ return Config(nested_dict)
diff --git a/skeletonkey/core.py b/skeletonkey/core.py
index f56220e..8aed867 100644
--- a/skeletonkey/core.py
+++ b/skeletonkey/core.py
@@ -9,9 +9,11 @@
load_yaml_config,
add_args_from_dict,
add_yaml_extension,
- namespace_to_nested_namespace,
+ config_to_nested_config,
+ Config
)
+TARGET_KEYWORD: str = "_target_"
def get_config_dir_path(config_path: str) -> str:
"""
@@ -73,7 +75,7 @@ def _inner_function():
parser = argparse.ArgumentParser()
add_args_from_dict(parser, config)
args = parser.parse_args()
- args = namespace_to_nested_namespace(args)
+ args = config_to_nested_config(args)
return main(args)
return _inner_function
@@ -101,14 +103,14 @@ def import_class(class_string: str) -> Type[Any]:
return class_obj
-def instantiate(namespace: argparse.Namespace, **kwargs) -> Any:
+def instantiate(config: Config, target_keyword=TARGET_KEYWORD, **kwargs) -> Any:
"""
- Instantiate a class object using a Namespace object.
- The Namespace object should contain the key "_target_" to
+ Instantiate a class object using a Config object.
+ The Config object should contain the key "_target_" to
specify the class to instantiate.
Args:
- namespace (argparse.Namespace): A Namespace object containing the key "_target_"
+ config (Config): A Config object containing the key "_target_"
to specify the class, along with any additional keyword
arguments for the class.
@@ -118,8 +120,7 @@ def instantiate(namespace: argparse.Namespace, **kwargs) -> Any:
Raises:
TypeError: If the class is missing specific parameters.
"""
- obj_kwargs = vars(namespace).copy()
- target_keyword = "_target_"
+ obj_kwargs = vars(config).copy()
class_obj = import_class(obj_kwargs[target_keyword])
del obj_kwargs[target_keyword]
@@ -141,14 +142,14 @@ def instantiate(namespace: argparse.Namespace, **kwargs) -> Any:
return class_obj(**obj_kwargs)
-def instantiate_all(namespace: argparse.Namespace, **kwargs) -> Tuple[Any]:
+def instantiate_all(config: Config, target_keyword=TARGET_KEYWORD, **kwargs) -> Tuple[Any]:
"""
- Instantiate a tuple of class objects using a Namespace object.
- The Namespace object should contain other Namespace objects where the key
+ Instantiate a tuple of class objects using a Config object.
+ The Config object should contain other Config objects where the key
"_target_" is at the top level, which specifies the class to instantiate.
Args:
- namespace (argparse.Namespace): A Namespace object containing the key "_target_"
+ config (Config): A Config object containing the key "_target_"
to specify the class , along with any additional keyword arguments for the class.
Returns:
@@ -157,14 +158,14 @@ def instantiate_all(namespace: argparse.Namespace, **kwargs) -> Tuple[Any]:
Raises:
ValueError: If any subconfig does not have "_target_" key.
"""
- collection_dict = vars(namespace).copy()
+ collection_dict = vars(config).copy()
objects = []
for obj_key in collection_dict.keys():
obj_namespace = collection_dict[obj_key]
- if not hasattr(obj_namespace, "_target_"):
+ if not hasattr(obj_namespace, target_keyword):
raise ValueError(f"subconfig ({obj_key}) in collection does not have '_target_' key at the top level.")
obj = instantiate(obj_namespace, **kwargs)
| Subconfig override
Merging in changes for command line overrides of arguments in subconfigs.
| 2023-10-24T04:46:59 | 0.0 | [] | [] |
|||
facebookresearch/CompilerGym | facebookresearch__CompilerGym-510 | 87e4f7ec1709a8d94acab2c5944d0efc70622555 | diff --git a/compiler_gym/envs/llvm/datasets/cbench.py b/compiler_gym/envs/llvm/datasets/cbench.py
index f344f085c..992e67add 100644
--- a/compiler_gym/envs/llvm/datasets/cbench.py
+++ b/compiler_gym/envs/llvm/datasets/cbench.py
@@ -288,7 +288,7 @@ def _make_cBench_validator(
def validator_cb(env: "LlvmEnv") -> Optional[ValidationError]: # noqa: F821
"""The validation callback."""
with _CBENCH_DOWNLOAD_THREAD_LOCK:
- with fasteners.InterProcessLock(cache_path("cbench-v1-runtime-data.LOCK")):
+ with fasteners.InterProcessLock(cache_path(".cbench-v1-runtime-data.LOCK")):
download_cBench_runtime_data()
cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data")
@@ -557,7 +557,7 @@ def __init__(self, site_data_base: Path):
def install(self):
super().install()
with _CBENCH_DOWNLOAD_THREAD_LOCK:
- with fasteners.InterProcessLock(cache_path("cbench-v1-runtime-data.LOCK")):
+ with fasteners.InterProcessLock(cache_path(".cbench-v1-runtime-data.LOCK")):
download_cBench_runtime_data()
diff --git a/compiler_gym/envs/llvm/service/Benchmark.cc b/compiler_gym/envs/llvm/service/Benchmark.cc
index 813dd8270..dcc034a18 100644
--- a/compiler_gym/envs/llvm/service/Benchmark.cc
+++ b/compiler_gym/envs/llvm/service/Benchmark.cc
@@ -67,6 +67,24 @@ RealizedBenchmarkDynamicConfig realizeDynamicConfig(const BenchmarkDynamicConfig
return RealizedBenchmarkDynamicConfig(cfg);
}
+/**
+ * Create a temporary directory to use as a scratch pad for on-disk storage.
+ * This directory is guaranteed to exist.
+ *
+ * Errors in this function are fatal.
+ *
+ * @return fs::path A path.
+ */
+fs::path createScratchDirectoryOrDie() {
+ const fs::path cacheRoot = util::getCacheRootPath();
+ const fs::path dir = fs::unique_path(cacheRoot / "benchmark-scratch-%%%%-%%%%");
+
+ sys::error_code ec;
+ fs::create_directories(dir, ec);
+ CHECK(!ec) << "Failed to create scratch directory: " << dir;
+ return dir;
+}
+
} // anonymous namespace
Status readBitcodeFile(const fs::path& path, Bitcode* bitcode) {
@@ -135,7 +153,7 @@ Benchmark::Benchmark(const std::string& name, const Bitcode& bitcode,
const BaselineCosts& baselineCosts)
: context_(std::make_unique<llvm::LLVMContext>()),
module_(makeModuleOrDie(*context_, bitcode, name)),
- scratchDirectory_(fs::path(fs::unique_path(workingDirectory / "scratch-%%%%-%%%%"))),
+ scratchDirectory_(createScratchDirectoryOrDie()),
dynamicConfigProto_(dynamicConfig),
dynamicConfig_(realizeDynamicConfig(dynamicConfig, scratchDirectory_)),
baselineCosts_(baselineCosts),
@@ -143,11 +161,7 @@ Benchmark::Benchmark(const std::string& name, const Bitcode& bitcode,
needsRecompile_(true),
runtimesPerObservationCount_(kDefaultRuntimesPerObservationCount),
warmupRunsPerRuntimeObservationCount_(kDefaultWarmupRunsPerRuntimeObservationCount),
- buildtimesPerObservationCount_(kDefaultBuildtimesPerObservationCount) {
- sys::error_code ec;
- fs::create_directory(scratchDirectory(), ec);
- CHECK(!ec) << "Failed to create scratch directory: " << scratchDirectory();
-}
+ buildtimesPerObservationCount_(kDefaultBuildtimesPerObservationCount) {}
Benchmark::Benchmark(const std::string& name, std::unique_ptr<llvm::LLVMContext> context,
std::unique_ptr<llvm::Module> module,
@@ -155,16 +169,12 @@ Benchmark::Benchmark(const std::string& name, std::unique_ptr<llvm::LLVMContext>
const BaselineCosts& baselineCosts)
: context_(std::move(context)),
module_(std::move(module)),
- scratchDirectory_(fs::path(fs::unique_path(workingDirectory / "scratch-%%%%-%%%%"))),
+ scratchDirectory_(createScratchDirectoryOrDie()),
dynamicConfigProto_(dynamicConfig),
dynamicConfig_(realizeDynamicConfig(dynamicConfig, scratchDirectory_)),
baselineCosts_(baselineCosts),
name_(name),
- needsRecompile_(true) {
- sys::error_code ec;
- fs::create_directory(scratchDirectory(), ec);
- CHECK(!ec) << "Failed to create scratch directory: " << scratchDirectory().string();
-}
+ needsRecompile_(true) {}
void Benchmark::close() {
sys::error_code ec;
diff --git a/compiler_gym/envs/llvm/service/BenchmarkFactory.cc b/compiler_gym/envs/llvm/service/BenchmarkFactory.cc
index 3ad73da77..4da5984a0 100644
--- a/compiler_gym/envs/llvm/service/BenchmarkFactory.cc
+++ b/compiler_gym/envs/llvm/service/BenchmarkFactory.cc
@@ -38,6 +38,15 @@ BenchmarkFactory::BenchmarkFactory(const boost::filesystem::path& workingDirecto
VLOG(2) << "BenchmarkFactory initialized";
}
+BenchmarkFactory::~BenchmarkFactory() { close(); }
+
+void BenchmarkFactory::close() {
+ VLOG(2) << "BenchmarkFactory closing with " << benchmarks_.size() << " entries";
+ for (auto& entry : benchmarks_) {
+ entry.second.close();
+ }
+}
+
Status BenchmarkFactory::getBenchmark(const BenchmarkProto& benchmarkMessage,
std::unique_ptr<Benchmark>* benchmark) {
// Check if the benchmark has already been loaded into memory.
diff --git a/compiler_gym/envs/llvm/service/BenchmarkFactory.h b/compiler_gym/envs/llvm/service/BenchmarkFactory.h
index bd30974bd..d057785ab 100644
--- a/compiler_gym/envs/llvm/service/BenchmarkFactory.h
+++ b/compiler_gym/envs/llvm/service/BenchmarkFactory.h
@@ -62,6 +62,10 @@ class BenchmarkFactory {
return instance;
}
+ ~BenchmarkFactory();
+
+ void close();
+
/**
* Get the requested named benchmark.
*
diff --git a/compiler_gym/util/BUILD b/compiler_gym/util/BUILD
index 6c1ecd42c..a9cad9397 100644
--- a/compiler_gym/util/BUILD
+++ b/compiler_gym/util/BUILD
@@ -68,6 +68,7 @@ cc_library(
visibility = ["//visibility:public"],
deps = [
"@boost//:filesystem",
+ "@fmt",
],
)
diff --git a/compiler_gym/util/RunfilesPath.cc b/compiler_gym/util/RunfilesPath.cc
index 8e6ebf888..5183b62f9 100644
--- a/compiler_gym/util/RunfilesPath.cc
+++ b/compiler_gym/util/RunfilesPath.cc
@@ -2,16 +2,24 @@
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
+#include <fmt/format.h>
+
#include "boost/filesystem.hpp"
namespace fs = boost::filesystem;
namespace compiler_gym::util {
-// When running under bazel, the working directory is the root of the
-// CompilerGym workspace. Back up one level so that we can reference other
-// workspaces.
-static const std::string kDefaultBase{"."};
+namespace {
+
+static const char* UNKNOWN_USER_NAME = "unknown";
+
+inline std::string getUser() {
+ const char* base = std::getenv("USER");
+ return base ? base : UNKNOWN_USER_NAME;
+}
+
+} // namespace
fs::path getRunfilesPath(const std::string& relPath) {
const char* base = std::getenv("COMPILER_GYM_RUNFILES");
@@ -37,7 +45,26 @@ fs::path getSiteDataPath(const std::string& relPath) {
} else {
// $HOME may not be set under testing conditions. In this case, use a
// throwaway directory.
- return fs::temp_directory_path() / "compiler_gym" / relPath;
+ return fs::temp_directory_path() / fmt::format("compiler_gym_{}", getUser()) / relPath;
+ }
+}
+
+fs::path getCacheRootPath() {
+ // NOTE(cummins): This function has a related implementation in the Python
+ // sources, compiler_gym.util.runfiles_path.get_cache_path(). Any change to
+ // behavior here must be reflected in the Python version.
+ const char* force = std::getenv("COMPILER_GYM_CACHE");
+ if (force) {
+ return fs::path(force);
+ }
+
+ const char* home = std::getenv("HOME");
+ if (home) {
+ return fs::path(home) / ".local/cache/compiler_gym";
+ } else {
+ // $HOME may not be set under testing conditions. In this case, use a
+ // throwaway directory.
+ return fs::temp_directory_path() / fmt::format("compiler_gym_{}", getUser());
}
}
diff --git a/compiler_gym/util/RunfilesPath.h b/compiler_gym/util/RunfilesPath.h
index f2308edd2..e38fe7907 100644
--- a/compiler_gym/util/RunfilesPath.h
+++ b/compiler_gym/util/RunfilesPath.h
@@ -22,4 +22,11 @@ boost::filesystem::path getRunfilesPath(const std::string& relPath);
*/
boost::filesystem::path getSiteDataPath(const std::string& relPath);
+/**
+ * Resolve the root of the cache path.
+ *
+ * @return boost::filesystem::path A path.
+ */
+boost::filesystem::path getCacheRootPath();
+
} // namespace compiler_gym::util
| [llvm] "./a.out: Permission denied" error for runtime observations in Google Colaboratory
## ð Bug
I'm not sure if this is a security feature of colab (preventing execution of arbitrary binaries) or a problem that we can fix in CompilerGym.
## To Reproduce
```py
import compiler_gym
env = compiler_gym.make("llvm-v0")
env.reset()
env.observation.Runtime()
```
[Demo notebook](https://colab.research.google.com/drive/1m-W3Qy0swiD8P4YvpfX6SP6UYSFWVhnH?usp=sharing).
## Environment
Please fill in this checklist:
- CompilerGym: v0.2.0
- How you installed CompilerGym (conda, pip, source): pip
[llvm] "./a.out: Permission denied" error for runtime observations in Google Colaboratory
## ð Bug
I'm not sure if this is a security feature of colab (preventing execution of arbitrary binaries) or a problem that we can fix in CompilerGym.
## To Reproduce
```py
import compiler_gym
env = compiler_gym.make("llvm-v0")
env.reset()
env.observation.Runtime()
```
[Demo notebook](https://colab.research.google.com/drive/1m-W3Qy0swiD8P4YvpfX6SP6UYSFWVhnH?usp=sharing).
## Environment
Please fill in this checklist:
- CompilerGym: v0.2.0
- How you installed CompilerGym (conda, pip, source): pip
| I was able to reproduce a similar issue in a docker container, and it turned out the problem was that executables in `/dev/shm` are not executable. It would be worth setting a different transient cache and seeing if the problem still holds in colab.
Cheers,
Chris
I was able to reproduce a similar issue in a docker container, and it turned out the problem was that executables in `/dev/shm` are not executable. It would be worth setting a different transient cache and seeing if the problem still holds in colab.
Cheers,
Chris | 2021-12-09T23:07:41 | 0.0 | [] | [] |
||
embeddings-benchmark/mteb | embeddings-benchmark__mteb-1414 | cdb92c659bb7d3d0402c88c9e9c69f2d71b221e1 | diff --git a/mteb/models/evaclip_models.py b/mteb/models/evaclip_models.py
index 015c965c0..2a98277d0 100644
--- a/mteb/models/evaclip_models.py
+++ b/mteb/models/evaclip_models.py
@@ -89,7 +89,7 @@ def get_image_embeddings(
for i in tqdm(range(0, len(images), batch_size)):
batch_images = images[i : i + batch_size]
inputs = torch.vstack(
- [self.img_preprocess(b) for b in batch_images]
+ [self.img_preprocess(b).unsqueeze(0) for b in batch_images]
)
image_outputs = self.model.encode_image(inputs.to(self.device))
all_image_embeddings.append(image_outputs.cpu())
| [mieb] EVA on CVBenchCount fails
Maybe same as https://github.com/embeddings-benchmark/mteb/issues/1393
```
with torch.no_grad(), torch.cuda.amp.autocast():
0%| | 0/788 [00:00<?, ?it/s]
ERROR:mteb.evaluation.MTEB:Error while evaluating CVBenchCount: not enough values to unpack (expected 4, got
3)
Traceback (most recent call last):
File "/data/niklas/mieb/mteb/scripts/run_mieb.py", line 90, in <module>
results = evaluation.run(model, output_folder="/data/niklas/mieb/results-mieb-final", batch_size=1)
File "/data/niklas/mieb/mteb/mteb/evaluation/MTEB.py", line 464, in run
raise e
File "/data/niklas/mieb/mteb/mteb/evaluation/MTEB.py", line 425, in run
results, tick, tock = self._run_eval(
File "/data/niklas/mieb/mteb/mteb/evaluation/MTEB.py", line 300, in _run_eval
results = task.evaluate(
File "/data/niklas/mieb/mteb/mteb/abstasks/AbsTask.py", line 126, in evaluate
scores[hf_subset] = self._evaluate_subset(
File "/data/niklas/mieb/mteb/mteb/abstasks/Image/AbsTaskAny2TextMultipleChoice.py", line 62, in _evaluate_subset
scores = evaluator(model, encode_kwargs=encode_kwargs)
File "/data/niklas/mieb/mteb/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py", line 78, in __call__
query_embeddings = model.get_fused_embeddings(
File "/data/niklas/mieb/mteb/mteb/models/evaclip_models.py", line 128, in get_fused_embeddings
image_embeddings = self.get_image_embeddings(images, batch_size)
File "/data/niklas/mieb/mteb/mteb/models/evaclip_models.py", line 94, in get_image_embeddings
image_outputs = self.model.encode_image(inputs.to(self.device))
File "/data/niklas/mieb/mteb/EVA/EVA-CLIP/rei/eva_clip/model.py", line 302, in encode_image
features = self.visual(image)
File "/env/lib/conda/gritkto4/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/env/lib/conda/gritkto4/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/data/niklas/mieb/mteb/EVA/EVA-CLIP/rei/eva_clip/eva_vit_model.py", line 529, in forward
x = self.forward_features(x)
File "/data/niklas/mieb/mteb/EVA/EVA-CLIP/rei/eva_clip/eva_vit_model.py", line 491, in forward_features
x = self.patch_embed(x)
File "/env/lib/conda/gritkto4/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/env/lib/conda/gritkto4/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/data/niklas/mieb/mteb/EVA/EVA-CLIP/rei/eva_clip/eva_vit_model.py", line 321, in forward
B, C, H, W = x.shape
ValueError: not enough values to unpack (expected 4, got 3)
```
| I think this is the same as #1393 | 2024-11-08T15:12:28 | 0.0 | [] | [] |
||
ARISE-Initiative/robosuite | ARISE-Initiative__robosuite-389 | fc03c5f13ff3a6241cb85c7d2c50ccaef5ea8cd8 | diff --git a/robosuite/__init__.py b/robosuite/__init__.py
index 7d02e93c7b..6ec977bccb 100644
--- a/robosuite/__init__.py
+++ b/robosuite/__init__.py
@@ -1,14 +1,3 @@
-from robosuite.utils.log_utils import ROBOSUITE_DEFAULT_LOGGER
-
-try:
- from robosuite.macros_private import *
-except ImportError:
- import robosuite
-
- ROBOSUITE_DEFAULT_LOGGER.warn("No private macro file found!")
- ROBOSUITE_DEFAULT_LOGGER.warn("It is recommended to use a private macro file")
- ROBOSUITE_DEFAULT_LOGGER.warn("To setup, run: python {}/scripts/setup_macros.py".format(robosuite.__path__[0]))
-
from robosuite.environments.base import make
# Manipulation environments
diff --git a/robosuite/devices/keyboard.py b/robosuite/devices/keyboard.py
index 76dd71f22f..fb37648d61 100644
--- a/robosuite/devices/keyboard.py
+++ b/robosuite/devices/keyboard.py
@@ -54,7 +54,6 @@ def print_command(char, info):
print_command("z-x", "rotate arm about x-axis")
print_command("t-g", "rotate arm about y-axis")
print_command("c-v", "rotate arm about z-axis")
- print_command("ESC", "quit")
print("")
def _reset_internal_state(self):
diff --git a/robosuite/devices/spacemouse.py b/robosuite/devices/spacemouse.py
index ae0e41f203..604989ff28 100644
--- a/robosuite/devices/spacemouse.py
+++ b/robosuite/devices/spacemouse.py
@@ -120,8 +120,10 @@ def __init__(
):
print("Opening SpaceMouse device")
+ self.vendor_id = vendor_id
+ self.product_id = product_id
self.device = hid.device()
- self.device.open(vendor_id, product_id) # SpaceMouse
+ self.device.open(self.vendor_id, self.product_id) # SpaceMouse
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
@@ -164,7 +166,6 @@ def print_command(char, info):
print_command("Move mouse laterally", "move arm horizontally in x-y plane")
print_command("Move mouse vertically", "move arm vertically")
print_command("Twist mouse about an axis", "rotate arm about a corresponding axis")
- print_command("ESC", "quit")
print("")
def _reset_internal_state(self):
@@ -223,7 +224,7 @@ def run(self):
d = self.device.read(13)
if d is not None and self._enabled:
- if macros.SPACEMOUSE_PRODUCT_ID == 50741:
+ if self.product_id == 50741:
## logic for older spacemouse model
if d[0] == 1: ## readings from 6-DoF sensor
diff --git a/robosuite/macros.py b/robosuite/macros.py
index 76f26a721d..c49c84b6a2 100644
--- a/robosuite/macros.py
+++ b/robosuite/macros.py
@@ -36,9 +36,20 @@
# Spacemouse settings. Used by SpaceMouse class in robosuite/devices/spacemouse.py
SPACEMOUSE_VENDOR_ID = 9583
-SPACEMOUSE_PRODUCT_ID = 50735
+SPACEMOUSE_PRODUCT_ID = 50734
# If LOGGING LEVEL is set to None, the logger will be turned off
CONSOLE_LOGGING_LEVEL = "WARN"
# File logging is written to /tmp/robosuite_{time}_{pid}.log by default
FILE_LOGGING_LEVEL = None
+
+# Override with macros from macros_private.py file, if it exists
+try:
+ from robosuite.macros_private import *
+except ImportError:
+ import robosuite
+ from robosuite.utils.log_utils import ROBOSUITE_DEFAULT_LOGGER
+
+ ROBOSUITE_DEFAULT_LOGGER.warn("No private macro file found!")
+ ROBOSUITE_DEFAULT_LOGGER.warn("It is recommended to use a private macro file")
+ ROBOSUITE_DEFAULT_LOGGER.warn("To setup, run: python {}/scripts/setup_macros.py".format(robosuite.__path__[0]))
| SpaceMouse class seems to have bugs
## Environments
- Ubuntu 22.04 with open-source Linux drivers for 3Dconnexion devices
- Using SpaceMouse Compact being detected by hid.enumerate().
- Robosuite installed in Conda's virtual environment
- Custom macro setup with correct device and vendor IDs
## Bug description
I am using SpaceMouse compact and encountered the following issue.
When I run `robosuite/robosuite/demos/demo_device_control.py`, hid fails to open my SpaceMouse Compact even if I set device and vendor IDs correctly in `macros_private.py`. This is weird because I was able to manually open it by `device = hid.device(); device.open(9583, 50741)`.
It turned out that SpaceMouse class uses default values of `vendor_id=macros.SPACEMOUSE_VENDOR_ID` and `product_id=macros.SPACEMOUSE_PRODUCT_ID` imported from `macros.py` instead of `macros_private.py`. If this is an intended behavior, it's fine. However, it is a bit weird to me that robosuite asks users to set up `macros_private.py` but does't use it internally.
Another lines that are seemingly a bug is the following;
```python
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if macros.SPACEMOUSE_PRODUCT_ID == 50741:
## logic for older spacemouse model
```
These lines check if `SPACEMOUSE_PRODUCT_ID` is 50741, but if SpaceMouse class is instantiated with `product_id=50741` without specifying `SPACEMOUSE_PRODUCT_ID` in `macros.py`, these line should check if `product_id` provided was 50741.
If these are indeed bugs, I will submit a PR to fix them.
| Also, another thing that seems to be a bug is that in both spacemouse and keyboard control cases, pressing ESC does not end the simulation. Indeed, the code does not seem to be detecting whether ESC key is pressed or not.
If you want me to fix this including above, please let me know if those are actually intended or indeed bugs. I can't do anything otherwise.
I found [robosuite/devices/spacemouse.py](https://github.com/ARISE-Initiative/robosuite/blob/master/robosuite/devices/spacemouse.py) also uses `import robosuite.macros as macros`, which loads the default `macros.py` instead of `private_macros.py`. Maybe the same issue exists in other files too? | 2023-04-07T20:33:53 | 0.0 | [] | [] |
||
iglu-contest/iglu | iglu-contest__iglu-16 | c7fb7faf253a57c31da223b922449d638768dc28 | diff --git a/iglu/env.py b/iglu/env.py
index a545c2e..2d900f0 100644
--- a/iglu/env.py
+++ b/iglu/env.py
@@ -37,7 +37,7 @@
class IGLUEnv(_SingleAgentEnv):
def __init__(
- self, *args, max_steps=500, resolution=(64, 64),
+ self, *args, max_steps=500, resolution=(64, 64),
start_position=(0.5, GROUND_LEVEL + 1, 0.5, 0., -90),
initial_blocks=None,
bound_agent=True, action_space='human-level', **kwargs
@@ -63,7 +63,7 @@ def resolution(self):
def resolution(self, val):
self._resolution = val
self.task.resolution = val
-
+
@property
def bound_agent(self):
return self._bound_agent
@@ -109,7 +109,7 @@ def action_space(self):
elif self.action_space_type == 'human-level':
flatten_func = lambda a: (a, (lambda x: x))
self.action_space_, self.unflatten_action = flatten_func(self.action_space_)
-
+
return self.action_space_
@action_space.setter
@@ -120,7 +120,7 @@ def action_space(self, new_space):
k: v for k, v in self.action_space_.spaces.items()
if k != 'fake_reset'
})
-
+
def _init_tasks(self):
self.spec._kwargs['env_spec'].task_monitor.tasks = self._tasks
@@ -201,8 +201,8 @@ def step(self, action):
class IGLUEnvSpec(SimpleEmbodimentEnvSpec):
ENTRYPOINT = 'iglu.env:IGLUEnv'
def __init__(
- self, *args,
- iglu_evaluation=False, resolution=(64, 64),
+ self, *args,
+ iglu_evaluation=False, resolution=(64, 64),
start_position=(0.5, GROUND_LEVEL + 1, 0.5, 0, -90),
initial_blocks=None,
bound_agent=True, ation_space='human-level', **kwargs
@@ -292,7 +292,7 @@ def create_observables(self) -> List[Handler]:
AgentPosObservation(),
handlers.CompassObservation(),
HotBarObservation(6),
- RayObservation(),
+ #RayObservation(),
ChatObservation(self.task_monitor),
GridObservation(
grid_name='build_zone',
@@ -340,7 +340,7 @@ def discrete_actions(self):
HotBarChoiceAction(6),
FakeResetAction(),
]
-
+
def flatten_discrete_actions(self, action_space):
sps = action_space.spaces
new_space = Dict({
| For some reason, I cannot run the program on Ubuntu 20.04 with CUDA 11.4
Greeting.
My environment is
Ubuntu 20.04.3 LTS
RTX 3090
NVIDIA-SMI 470.86 Driver Version: 470.86 CUDA Version: 11.4 cudnn8
I tried to run this program on my local machine, and then I encounter KeyError
``` bash
(iglu_env437) â iglu git:(master) â python test/test_env.py
/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/gym/logger.py:34: UserWarning: WARN: Box bound precision lowered by casting to float32
warnings.warn(colorize("%s: %s" % ("WARN", msg % args), "yellow"))
/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/gym/logger.py:34: UserWarning: WARN: Box bound precision lowered by casting to float32
warnings.warn(colorize("%s: %s" % ("WARN", msg % args), "yellow"))
action space: Dict(attack:Discrete(2), back:Discrete(2), camera:Box(low=-180.0, high=180.0, shape=(2,)), forward:Discrete(2), hotbar:Discrete(7), jump:Discrete(2), left:Discrete(2), right:Discrete(2), use:Discrete(2))
observation space: Dict(agentPos:Box(low=-180.0, high=180.0, shape=(5,)), chat:<iglu.handlers.String object at 0x7fb573945890>, compass:Dict(angle:Box(low=-180.0, high=180.0, shape=())), grid:Box(low=0.0, high=6.0, shape=(9, 11, 11)), inventory:Box(low=0.0, high=20.0, shape=(6,)), pov:Box(low=0, high=255, shape=(64, 64, 3)), ray:<iglu.handlers.String object at 0x7fb573c09f50>)
current task: None
/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/gym/logger.py:34: UserWarning: WARN: Box bound precision lowered by casting to float32
warnings.warn(colorize("%s: %s" % ("WARN", msg % args), "yellow"))
/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/gym/logger.py:34: UserWarning: WARN: Box bound precision lowered by casting to float32
warnings.warn(colorize("%s: %s" % ("WARN", msg % args), "yellow"))
/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/runpy.py:125: RuntimeWarning: 'minerl_patched.utils.process_watcher' found in sys.modules after import of package 'minerl_patched.utils', but prior to execution of 'minerl_patched.utils.process_watcher'; this may result in unpredictable behaviour
warn(RuntimeWarning(msg))
72it [00:00, 125.10it/s]No pitch observation! Yielding default value 0.0 for pitch
No xpos observation! Yielding default value 0.0 for xpos
No yaw observation! Yielding default value 0.0 for yaw
No ypos observation! Yielding default value 0.0 for ypos
No zpos observation! Yielding default value 0.0 for zpos
Traceback (most recent call last):
File "test/test_env.py", line 24, in <module>
obs, reward, done, info = env.step(action)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/gym/wrappers/time_limit.py", line 18, in step
observation, reward, done, info = self.env.step(action)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/iglu-0.2.2-py3.7.egg/iglu/env.py", line 185, in step
obs, reward, done, info = super().step(action)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/env/_singleagent.py", line 32, in step
obs, rew, done, info = super().step(multi_agent_action)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/env/_multiagent.py", line 308, in step
out_obs, monitor = self._process_observation(actor_name, obs, _malmo_json)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/env/_multiagent.py", line 217, in _process_observation
obs_dict[h.to_string()] = h.from_hero(info)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/hero/handlers/translation.py", line 126, in from_hero
for h in self.handlers
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/hero/handlers/translation.py", line 126, in <dictcomp>
for h in self.handlers
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/hero/handlers/translation.py", line 91, in from_hero
return self.walk_dict(hero_dict, self.hero_keys)
File "/home/sukai/anaconda3/envs/iglu_env437/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/hero/handlers/translation.py", line 80, in walk_dict
raise KeyError()
KeyError
```
Not sure what caused this.
But I can successfully run the program on docker container. I think the only difference is that the container is based on nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04
| Hi,
A new error message came out when I tried to build Docker manually
``` bash
â iglu git:(master) â docker run --network host --rm -it -v $(pwd):/root/iglu_dev iglu_env python iglu_dev/test/test_env.py
Traceback (most recent call last):
File "iglu_dev/test/test_env.py", line 2, in <module>
import minerl_patched
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/__init__.py", line 7, in <module>
import minerl_patched.herobraine.envs
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/envs.py", line 19, in <module>
MINERL_TREECHOP_V0 = Treechop()
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/env_specs/treechop_specs.py", line 44, in __init__
**kwargs)
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/env_specs/simple_embodiment.py", line 34, in __init__
super().__init__(name, *args, **kwargs)
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/env_spec.py", line 38, in __init__
self.reset()
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/env_spec.py", line 44, in reset
self.actionables = self.create_actionables()
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/env_specs/simple_embodiment.py", line 47, in create_actionables
handlers.KeybasedCommandAction(k, v) for k, v in INVERSE_KEYMAP.items()
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/env_specs/simple_embodiment.py", line 48, in <listcomp>
if k in SIMPLE_KEYBOARD_ACTION
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/hero/handlers/agent/actions/keyboard.py", line 60, in __init__
super().__init__(command, spaces.Discrete(len(keys) + 1))
File "/root/miniconda/envs/py37/lib/python3.7/site-packages/minerl_patched-0.1.0-py3.7-linux-x86_64.egg/minerl_patched/herobraine/hero/spaces.py", line 206, in __init__
self.shape = ()
AttributeError: can't set attribute
```
So, this means that the it's not about OS or CUDA, I think something wrong with the current dependencies.
Hi, wrt the second error, make sure you are using `gym==0.18.3`.
> Hi, wrt the second error, make sure you are using `gym==0.18.3`.
yes, only the first error remained after I switched to ```gym==0.18.3``` | 2021-12-13T17:51:19 | 0.0 | [] | [] |
||
InstaPy/InstaPy | InstaPy__InstaPy-6097 | b573976e123dd2e4fac8677d8d85ecce33c6c38c | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0a3df8195..f65c5a222 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,7 +15,9 @@ _The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
- Added second `accept_igcookie_dialogue` to handled the second "cookie accept screen" that is not automatically accepted
### Fixed
+
- Move call to `get_following_status` above `web_address_navigator` inside `get_links_for_username` function
+- Fixed the correct URL `displaypurposes.com`, the old `displaypurposes.com` API is deprecated
## [0.6.13] - 2020-12-30
diff --git a/instapy/instapy.py b/instapy/instapy.py
index ba1aac76b..094cd531d 100644
--- a/instapy/instapy.py
+++ b/instapy/instapy.py
@@ -774,7 +774,7 @@ def set_smart_hashtags(
def set_smart_location_hashtags(
self, locations: list, radius: int = 10, limit: int = 3, log_tags: bool = True
):
- """Generate smart hashtags based on https://displaypurposes.com/map"""
+ """Generate smart hashtags based on https://apidisplaypurposes.com/map"""
if locations is None:
self.logger.error("set_smart_location_hashtags is misconfigured")
return self
@@ -792,7 +792,7 @@ def set_smart_location_hashtags(
bbox["lat_max"],
radius,
)
- url = "https://displaypurposes.com/local/?bbox={}".format(bbox_url)
+ url = "https://apidisplaypurposes.com/local/?bbox={}".format(bbox_url)
req = requests.get(url)
data = json.loads(req.text)
| set_smart_location_hashtags crashes
set_smart_location_hashtags crashes when using the code in the tutorial

Throws an error
```
---------------------------------------------------------------------------
JSONDecodeError Traceback (most recent call last)
<ipython-input-18-71f011c24cbe> in <module>
1 # session.set_smart_hashtags(['cycling', 'roadbike'], limit=3, sort='top', log_tags=True)
2
----> 3 session.set_smart_location_hashtags(['204517928/chicago-illinois', '213570652/nagoya-shi-aichi-japan'], radius=20, limit=10)
4 session.like_by_tags(['soccer', 'cr7', 'neymar'], amount=10, use_smart_location_hashtags=True)
5
/usr/local/lib/python3.8/site-packages/instapy/instapy.py in set_smart_location_hashtags(self, locations, radius, limit, log_tags)
784
785 req = requests.get(url)
--> 786 data = json.loads(req.text)
787 if int(data["count"]) == 0:
788 self.logger.warning("Too few results for {} location".format(location))
/usr/local/lib/python3.8/json/__init__.py in loads(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
355 parse_int is None and parse_float is None and
356 parse_constant is None and object_pairs_hook is None and not kw):
--> 357 return _default_decoder.decode(s)
358 if cls is None:
359 cls = JSONDecoder
/usr/local/lib/python3.8/json/decoder.py in decode(self, s, _w)
335
336 """
--> 337 obj, end = self.raw_decode(s, idx=_w(s, 0).end())
338 end = _w(s, end).end()
339 if end != len(s):
/usr/local/lib/python3.8/json/decoder.py in raw_decode(self, s, idx)
353 obj, end = self.scan_once(s, idx)
354 except StopIteration as err:
--> 355 raise JSONDecodeError("Expecting value", s, err.value) from None
356 return obj, end
JSONDecodeError: Expecting value: line 1 column 1 (char 0)
[6]
```
| 2021-02-27T20:04:52 | 0.0 | [] | [] |
|||
angr/angr-management | angr__angr-management-417 | 2583824081368f157e5ae1690783d8af0fbf9a7b | diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 000000000..31d827588
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "python.linting.pylintEnabled": true
+}
diff --git a/angrmanagement/plugins/chess_manager/diagnose_handler.py b/angrmanagement/plugins/chess_manager/diagnose_handler.py
index b39a5f0f2..e663e6332 100644
--- a/angrmanagement/plugins/chess_manager/diagnose_handler.py
+++ b/angrmanagement/plugins/chess_manager/diagnose_handler.py
@@ -8,6 +8,7 @@
from typing import Optional
from getmac import get_mac_address as gma
+from sqlalchemy.exc import OperationalError
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from angrmanagement.config import Conf
@@ -22,15 +23,16 @@
l = logging.getLogger(__name__)
l.setLevel('INFO')
-user_dir = os.path.expanduser('~')
-log_dir = os.path.join(user_dir, 'am-logging')
-os.makedirs(log_dir, exist_ok=True)
-log_file = os.path.join(log_dir, 'poi_diagnose.log')
-fh = logging.FileHandler(log_file)
-fh.setLevel('INFO')
-formatter: Formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-fh.setFormatter(formatter)
-l.addHandler(fh)
+def _init_logger():
+ user_dir = os.path.expanduser('~')
+ log_dir = os.path.join(user_dir, 'am-logging')
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, 'poi_diagnose.log')
+ fh = logging.FileHandler(log_file)
+ fh.setLevel('INFO')
+ formatter: Formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ l.addHandler(fh)
class DiagnoseHandler:
@@ -38,6 +40,8 @@ class DiagnoseHandler:
Handling POI records in slacrs
"""
def __init__(self, project_name=None, project_md5=None):
+ _init_logger()
+
self.project_name = project_name
self.project_md5 = project_md5
@@ -98,7 +102,7 @@ def get_pois(self):
try:
slacrs = Slacrs(database=Conf.checrs_backend_str)
session = slacrs.session()
- except Exception:
+ except OperationalError:
# Cannot connect
return None
@@ -125,7 +129,7 @@ def _commit_pois(self):
try:
slacrs = Slacrs(database=Conf.checrs_backend_str)
session = slacrs.session()
- except Exception:
+ except OperationalError:
l.error("Failed to CHECRS backend. Try again later...")
continue
diff --git a/angrmanagement/plugins/log_human_activities/log_human_activities.py b/angrmanagement/plugins/log_human_activities/log_human_activities.py
index 9f69f7fad..508a44982 100644
--- a/angrmanagement/plugins/log_human_activities/log_human_activities.py
+++ b/angrmanagement/plugins/log_human_activities/log_human_activities.py
@@ -13,15 +13,6 @@
l = logging.getLogger(__name__)
l.setLevel('INFO')
-user_dir = os.path.expanduser('~')
-log_dir = os.path.join(user_dir, "am-logging")
-os.makedirs(log_dir, exist_ok=True)
-log_file = os.path.join(log_dir, 'human_activities.log')
-fh = logging.FileHandler(log_file)
-fh.setLevel('INFO')
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-fh.setFormatter(formatter)
-l.addHandler(fh)
try:
from slacrs import Slacrs
@@ -38,6 +29,7 @@ def __init__(self, *args, **kwargs):
if not Slacrs:
raise Exception("Skipping LogHumanActivities Plugin. Please install Slacrs.")
super().__init__(*args, **kwargs)
+ self._init_logger()
self.session = None
self.project_name = None
self.project_md5 = None
@@ -47,6 +39,17 @@ def __init__(self, *args, **kwargs):
self.slacrs_thread = None
self.slacrs = None
+ def _init_logger(self): # pylint:disable=no-self-use
+ user_dir = os.path.expanduser('~')
+ log_dir = os.path.join(user_dir, "am-logging")
+ os.makedirs(log_dir, exist_ok=True)
+ log_file = os.path.join(log_dir, 'human_activities.log')
+ fh = logging.FileHandler(log_file)
+ fh.setLevel('INFO')
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ l.addHandler(fh)
+
def on_workspace_initialized(self, workspace):
self.slacrs_thread = threading.Thread(target=self._commit_logs)
self.slacrs_thread.setDaemon(True)
| angr-management creates "am-logging" directory in user's home folder by default
| 2021-07-26T21:23:44 | 0.0 | [] | [] |
|||
lm-sys/FastChat | lm-sys__FastChat-1905 | 0103e1ea9eb3a73316d495bf6c4955be3a09f889 | diff --git a/docs/model_support.md b/docs/model_support.md
index 765c10c9a..b1cd72bfe 100644
--- a/docs/model_support.md
+++ b/docs/model_support.md
@@ -30,7 +30,11 @@
- [baichuan-inc/baichuan-7B](https://huggingface.co/baichuan-inc/baichuan-7B)
- [internlm/internlm-chat-7b](https://huggingface.co/internlm/internlm-chat-7b)
- Any [EleutherAI](https://huggingface.co/EleutherAI) pythia model such as [pythia-6.9b](https://huggingface.co/EleutherAI/pythia-6.9b)
-- Any [Peft](https://github.com/huggingface/peft) adapter trained ontop of a model above. To activate, must have `peft` in the model path.
+- Any [Peft](https://github.com/huggingface/peft) adapter trained on top of a
+ model above. To activate, must have `peft` in the model path. Note: If
+ loading multiple peft models, you can have them share the base model weights by
+ setting the environment variable `PEFT_SHARE_BASE_WEIGHTS=true` in any model
+ worker.
## How to support a new model
diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py
index 3418b3ab2..136d32c91 100644
--- a/fastchat/model/model_adapter.py
+++ b/fastchat/model/model_adapter.py
@@ -2,7 +2,7 @@
import math
import sys
-from typing import List, Optional
+from typing import Dict, List, Optional
import warnings
if sys.version_info >= (3, 9):
@@ -11,6 +11,7 @@
from functools import lru_cache as cache
import accelerate
+import os
import psutil
import torch
from transformers import (
@@ -35,6 +36,12 @@
)
from fastchat.utils import get_gpu_memory
+# Check an environment variable to check if we should be sharing Peft model
+# weights. When false we treat all Peft models as separate.
+peft_share_base_weights = (
+ os.environ.get("PEFT_SHARE_BASE_WEIGHTS", "false").lower() == "true"
+)
+
class BaseModelAdapter:
"""The base and the default model adapter."""
@@ -254,6 +261,33 @@ def get_generate_stream_function(model: torch.nn.Module, model_path: str):
return generate_stream_falcon
elif is_codet5p:
return generate_stream_codet5p
+ elif peft_share_base_weights and "peft" in model_path:
+ # Return a curried stream function that loads the right adapter
+ # according to the model_name available in this context. This ensures
+ # the right weights are available.
+ @torch.inference_mode()
+ def generate_stream_peft(
+ model,
+ tokenizer,
+ params: Dict,
+ device: str,
+ context_len: int,
+ stream_interval: int = 2,
+ judge_sent_end: bool = False,
+ ):
+ model.set_adapter(model_path)
+ for x in generate_stream(
+ model,
+ tokenizer,
+ params,
+ device,
+ context_len,
+ stream_interval,
+ judge_sent_end,
+ ):
+ yield x
+
+ return generate_stream_peft
else:
return generate_stream
@@ -331,6 +365,9 @@ def remove_parent_directory_name(model_path):
return model_path.split("/")[-1]
+peft_model_cache = {}
+
+
class PeftModelAdapter:
"""Loads any "peft" model and it's base model."""
@@ -349,12 +386,42 @@ def load_model(self, model_path: str, from_pretrained_kwargs: dict):
f"PeftModelAdapter cannot load a base model with 'peft' in the name: {config.base_model_name_or_path}"
)
+ # Basic proof of concept for loading peft adapters that share the base
+ # weights. This is pretty messy because Peft re-writes the underlying
+ # base model and internally stores a map of adapter layers.
+ # So, to make this work we:
+ # 1. Cache the first peft model loaded for a given base models.
+ # 2. Call `load_model` for any follow on Peft models.
+ # 3. Make sure we load the adapters by the model_path. Why? This is
+ # what's accessible during inference time.
+ # 4. In get_generate_stream_function, make sure we load the right
+ # adapter before doing inference. This *should* be safe when calls
+ # are blocked the same semaphore.
+ if peft_share_base_weights:
+ if base_model_path in peft_model_cache:
+ model, tokenizer = peft_model_cache[base_model_path]
+ # Super important: make sure we use model_path as the
+ # `adapter_name`.
+ model.load_adapter(model_path, adapter_name=model_path)
+ else:
+ base_adapter = get_model_adapter(base_model_path)
+ base_model, tokenizer = base_adapter.load_model(
+ base_model_path, from_pretrained_kwargs
+ )
+ # Super important: make sure we use model_path as the
+ # `adapter_name`.
+ model = PeftModel.from_pretrained(
+ base_model, model_path, adapter_name=model_path
+ )
+ peft_model_cache[base_model_path] = (model, tokenizer)
+ return model, tokenizer
+
+ # In the normal case, load up the base model weights again.
base_adapter = get_model_adapter(base_model_path)
base_model, tokenizer = base_adapter.load_model(
base_model_path, from_pretrained_kwargs
)
model = PeftModel.from_pretrained(base_model, model_path)
-
return model, tokenizer
def get_default_conv_template(self, model_path: str) -> Conversation:
| [Feature Request] Fork fastchat/serve/model_worker.py to support multiple LoRA models
Right now `fastchat/serve/model_worker.py` supports one model. With LoRA (or other PEFT options) trained adapters, we could in theory load one base model and multiple adapters per worker and reduce the amount of times we need to load the base model. `fastchat/serve/model_worker.py` is probably a bad place to do this as it would complicate things, but it looks like forking the script to something like `fastchat/serve/multi_model_worker.py` should be pretty easy.
New options would need to support a list of `model_path:model_name` pairs. A safe hard assumption would be that all listed models share the same base model and that it would only be loaded once. The worker can then register each LoRA model with the controller.
Would a setup like this break anything within the controller? I'm assuming no however I haven't checked this directly.
If this seems reasonable, I'm happy to draft a PR.
| Note: A major Peft bug seems to block this from working right now: https://github.com/huggingface/peft/issues/430
Yes, this is definitely a feature we want.
cc @ZYHowell
Cool, then I can put it together, it's a high priority item for me. I think right now it won't be memory efficient since the base model can't be used by two different PeftModels, but I'll setup the basic server runner and find out what breaks. | 2023-07-09T10:07:26 | 0.0 | [] | [] |
||
aiortc/aiortc | aiortc__aiortc-900 | 9f14474c0953b90139c8697a216e4c2cd8ee5504 | diff --git a/src/aiortc/codecs/__init__.py b/src/aiortc/codecs/__init__.py
index a512939b9..b595a43cd 100644
--- a/src/aiortc/codecs/__init__.py
+++ b/src/aiortc/codecs/__init__.py
@@ -32,6 +32,9 @@
],
"video": [],
}
+# Note, the id space for these extensions is shared across media types when BUNDLE
+# is negotiated. If you add a audio- or video-specific extension, make sure it has
+# a unique id.
HEADER_EXTENSIONS: Dict[str, List[RTCRtpHeaderExtensionParameters]] = {
"audio": [
RTCRtpHeaderExtensionParameters(
@@ -46,7 +49,7 @@
id=1, uri="urn:ietf:params:rtp-hdrext:sdes:mid"
),
RTCRtpHeaderExtensionParameters(
- id=2, uri="http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time"
+ id=3, uri="http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time"
),
],
}
| 'Absolute Send Time' WebRTC extension bug
Hi,
I get the following warning upon the successfull connection of the two peers (but in fact it is an error since it prevent completely the tracks to be sent and received):
```
[WARNING][2022-06-28 13:00:29,863](72026)aiortc.rtcdtlstransport: RTCDtlsTransport(client) Traceback (most recent call last):
File "/home/bob/ws/modules/streaming-module/venv/lib/python3.8/site-packages/aiortc/rtcdtlstransport.py", line 531, in __run
await self._recv_next()
File "/home/bob/ws/modules/streaming-module/venv/lib/python3.8/site-packages/aiortc/rtcdtlstransport.py", line 630, in _recv_next
await self._handle_rtp_data(data, arrival_time_ms=arrival_time_ms)
File "/home/bob/ws/modules/streaming-module/venv/lib/python3.8/site-packages/aiortc/rtcdtlstransport.py", line 575, in _handle_rtp_data
packet = RtpPacket.parse(data, self._rtp_header_extensions_map)
File "/home/bob/ws/modules/streaming-module/venv/lib/python3.8/site-packages/aiortc/rtp.py", line 710, in parse
packet.extensions = extensions_map.get(extension_profile, extension_value)
File "/home/bob/ws/modules/streaming-module/venv/lib/python3.8/site-packages/aiortc/rtp.py", line 89, in get
values.abs_send_time = unpack("!L", b"\00" + x_value)[0]
struct.error: unpack requires a buffer of 4 bytes
```
I investigated a bit and I understood that is caused by the RTP extension "Absolute Send Time" ([https://webrtc.googlesource.com/src/+/refs/heads/main/docs/native-code/rtp-hdrext/abs-send-time](https://webrtc.googlesource.com/src/+/refs/heads/main/docs/native-code/rtp-hdrext/abs-send-time))
In the specification it is written that this extension is 4 bytes long, but the code in aiortc/rtp.py seems to ignore this and try to get the extension value as for any other extension. Then this value is unpacked but being too short an exception from the struct package is thrown.
You think all this I supposed is right?
I can try patching it and sending you a first fix.
Have a great day
| Update:
Trying to fix it, I noticed the logic in aiortc/rtp.py follows the standard RFC 5285, and I noticed that this standard has been obsoleted by RFC 8285. I think this work is an update more than a bug fix. Maybe I could try to update all to the current standard RFC 8285, but it would take me a lot of time (and unfortunately I am very poor of time this period...), I will let you know guys! Continue with this great work anyway!
@juberti any thoughts?
8285 is a fairly minor update to 5285 IIRC. So I'm not sure that that's the core issue here. Similarly, I would expect the extension parser to use the transmitted length rather than assuming any specific length for the extension.
Do you have a dump of a packet that the parser chokes on?
Yes, here are some packets that trigger the bug:
[rtp_abs_send_time_failed.zip](https://github.com/aiortc/aiortc/files/9008911/rtp_abs_send_time_failed.zip)
I think it is the biggest one that contain the invalid header (i.e. a video frame). I should have disabled TLS for the capture, but let me know if there is any problem.
By the way, I cannot understand the logic of packing/unpacking headers in rtp.py: why after the magic words 0x\be\de is appended the length in bytes of all the extensions and not the number of extensions as specified in RFC 5285? I think it could be the reason of the bug. Also, i noticed that in the broken packet the extension length has a value of "1", but it should be greater since 'mid' and 'abs_send_time' extensions are both included (and that is why the bug is triggered, the unpack doesn't receive enough bytes).
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
Hi,
You had time to investigate on this issue using the provided dumped packets?
Hi, is there something new to update regarding this one?
I experience the same thing when I try to receive the `rtp_data`...
I will look into this in the next week or two.
After I removed this `a=extmap:2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\n` from the offer before I sent it to the master peer, I was able to stream media between the two peers. It looks fine - no delay and good sync of audio-video tracks.
@juberti do we have any updates?
Could this be related to #686 ? I am wondering if we are negotiating extension IDs correct.
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
Still relevant.
I found the problem.
When there are two senders on the same transport, they both share the same header extensions map, which is wrong. If one sender is video and one audio, the header extensions map will, in my case, contain `abs_send_time=2, audio_level=2, mid=1`. The problem is that `abs_send_time` and `audio_level` both use id 2. Since the `HeaderExtensionsMap.get()` compares the received id to `abs_send_time` before `audio_level`, it will decode the audio level, which is 1 byte, as an absolute send time, and hence crash because it need 4 bytes, not 1.
If I change the id of abs-send-time to 3 in `codecs/__init__.py`, it all works.
So either we need unique header extensions maps per sender, or use unique ids (as a workaround as I see it).
@eerimoq, thanks for digging into this! I think your workaround would be a good short-term fix (and we can add a note to `codecs/__init__.py` to warn people about this for the future). | 2023-06-22T19:12:11 | 0.0 | [] | [] |
||
scottstanie/sentineleof | scottstanie__sentineleof-51 | efcf14c72bad2cb59d3ea543651e47b7f6f5987f | diff --git a/eof/cli.py b/eof/cli.py
index c363ee0..2479250 100644
--- a/eof/cli.py
+++ b/eof/cli.py
@@ -82,6 +82,11 @@
"--asf-password",
help="ASF password. If not provided the program asks for it",
)
[email protected](
+ "--ask-password",
+ is_flag=True,
+ help="ask for passwords interactively if needed",
+)
@click.option(
"--update-netrc",
is_flag=True,
@@ -100,6 +105,7 @@ def cli(
asf_password: str = "",
cdse_user: str = "",
cdse_password: str = "",
+ ask_password: bool = False,
update_netrc: bool = False,
):
"""Download Sentinel precise orbit files.
@@ -111,11 +117,12 @@ def cli(
With no arguments, searches current directory for Sentinel 1 products
"""
log._set_logger_handler(level=logging.DEBUG if debug else logging.INFO)
- dryrun = not update_netrc
- if not (asf_user and asf_password):
- asf_user, asf_password = setup_netrc(host=NASA_HOST, dryrun=dryrun)
- if not (cdse_user and cdse_password):
- cdse_user, cdse_password = setup_netrc(host=DATASPACE_HOST, dryrun=dryrun)
+ if ask_password:
+ dryrun = not update_netrc
+ if not force_asf and not (cdse_user and cdse_password):
+ cdse_user, cdse_password = setup_netrc(host=DATASPACE_HOST, dryrun=dryrun)
+ if not (cdse_user and cdse_password) and not (asf_user and asf_password):
+ asf_user, asf_password = setup_netrc(host=NASA_HOST, dryrun=dryrun)
download.main(
search_path=search_path,
| net Netrc/auth/login flow
@avalentino Opening this since I'm I had someone on the MAAP project point out a few things with the changes
1.
Right now we're forcing the user to set up *both* an ASF login and a CDSE loging:
```python
if not (asf_user and asf_password):
asf_user, asf_password = setup_netrc(host=NASA_HOST, dryrun=dryrun)
if not (cdse_user and cdse_password):
cdse_user, cdse_password = setup_netrc(host=DATASPACE_HOST, dryrun=dryrun)
```
I don't think we need people to have both of them set up. If someone always wants to download from CDSE, they should just be able to set that up and not get yelled at for not having an Earthdata login
2. Likewise, if someone only wants to get orbits through ASF, they should be able to ignore the CDSE ones. that's not happening now even with `--force-asf`
3. It's possible we just shouldn't ever to interactive logins, and print a big WARNING if we don't find a .netrc. It's still a little unexpected to me that entering the password now doesn't save it if you run `eof` again, but more importantly, there are cases where people have an auth token set up to download from ASF, and they dont want either an interactive login or a CSDE login prompt.
| I totally agree.
For CDSE we could add the following check:
```
if not force_asf and not (cdse_user and cdse_password):
cdse_user, cdse_password = setup_netrc(host=DATASPACE_HOST, dryrun=dryrun)
```
But probably it is even better what you suggest in point 3. This is what programs like sentinelsat do.
Maybe we could add a new flag that allows the user to specify when credentials shall be interactively asked (in the CLI).
I will try to create a new PR ASAP. | 2023-11-06T06:58:36 | 0.0 | [] | [] |
||
DavidLandup0/deepvision | DavidLandup0__deepvision-85 | f1b8b49e79c0995a274668c3ca7feb152eef512b | diff --git a/deepvision/__init__.py b/deepvision/__init__.py
index 89cad88..f4e8f52 100644
--- a/deepvision/__init__.py
+++ b/deepvision/__init__.py
@@ -1,4 +1,4 @@
from deepvision import evaluation
from deepvision import models
-__version__ = "0.1.2"
+__version__ = "0.1.3"
diff --git a/deepvision/layers/fused_mbconv.py b/deepvision/layers/fused_mbconv.py
index 2d7d36e..a0ac1c2 100644
--- a/deepvision/layers/fused_mbconv.py
+++ b/deepvision/layers/fused_mbconv.py
@@ -325,7 +325,7 @@ def tf_to_pt(layer, dummy_input=None):
# Pass dummy input through to
# get variables under `layer.variables`
if dummy_input is None:
- dummy_input = tf.random.normal([1, 224, 224, layer.input_filters])
+ dummy_input = tf.random.normal([1, 32, 32, layer.input_filters])
layer(dummy_input)
pytorch_mbconv = __FusedMBConvPT(
diff --git a/deepvision/layers/mbconv.py b/deepvision/layers/mbconv.py
index f026603..86f0c99 100644
--- a/deepvision/layers/mbconv.py
+++ b/deepvision/layers/mbconv.py
@@ -348,7 +348,7 @@ def tf_to_pt(layer, dummy_input=None):
# Pass dummy input through to
# get variables under `layer.variables`
if dummy_input is None:
- dummy_input = tf.random.normal([1, 224, 224, layer.input_filters])
+ dummy_input = tf.random.normal([1, 32, 32, layer.input_filters])
layer(dummy_input)
pytorch_mbconv = __MBConvPT(
diff --git a/deepvision/models/classification/efficientnet/efficientnet_weight_mapper.py b/deepvision/models/classification/efficientnet/efficientnet_weight_mapper.py
index 6525364..70c995a 100644
--- a/deepvision/models/classification/efficientnet/efficientnet_weight_mapper.py
+++ b/deepvision/models/classification/efficientnet/efficientnet_weight_mapper.py
@@ -86,113 +86,119 @@ def load_tf_to_pt(
# True
np.allclose(tf_model(dummy_input_tf)['output'].numpy(), pt_model(dummy_input_torch).detach().cpu().numpy())
"""
-
- # Temporarily need to supply this as custom_objects() due to a bug while
- # saving Functional Subclassing models
- model = tf.keras.models.load_model(
- filepath, custom_objects={"EfficientNetV2TF": EfficientNetV2TF}
- )
- model_config = model.get_config()
- target_model = EfficientNetV2PT(
- include_top=model_config["include_top"],
- classes=model_config["classes"],
- input_shape=tf.transpose(tf.squeeze(dummy_input), (2, 0, 1)).shape,
- pooling=model_config["pooling"],
- width_coefficient=model_config["width_coefficient"],
- depth_coefficient=model_config["depth_coefficient"],
- blockwise_kernel_sizes=model_config["blockwise_kernel_sizes"],
- blockwise_num_repeat=model_config["blockwise_num_repeat"],
- blockwise_input_filters=model_config["blockwise_input_filters"],
- blockwise_output_filters=model_config["blockwise_output_filters"],
- blockwise_expand_ratios=model_config["blockwise_expand_ratios"],
- blockwise_se_ratios=model_config["blockwise_se_ratios"],
- blockwise_strides=model_config["blockwise_strides"],
- blockwise_conv_type=model_config["blockwise_conv_type"],
- )
- # Copy stem
- target_model.stem_conv.weight.data = torch.nn.Parameter(
- torch.from_numpy(tf.transpose(model.layers[1].kernel, (3, 2, 0, 1)).numpy())
- )
- # Copy BatchNorm
- target_model.stem_bn.weight.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[2].gamma.numpy())
- )
- target_model.stem_bn.bias.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[2].beta.numpy())
- )
- target_model.stem_bn.running_mean.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[2].moving_mean.numpy())
- )
- target_model.stem_bn.running_var.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[2].moving_variance.numpy())
- )
-
- tf_blocks = [
- block
- for block in model.layers
- if isinstance(block, __FusedMBConvTF) or isinstance(block, __MBConvTF)
- ]
-
- for pt_block, tf_block in zip(target_model.blocks, tf_blocks):
- if isinstance(tf_block, __FusedMBConvTF):
- converted_block = fused_mbconv.tf_to_pt(tf_block)
- pt_block.load_state_dict(converted_block.state_dict())
- if isinstance(tf_block, __MBConvTF):
- converted_block = mbconv.tf_to_pt(tf_block)
- pt_block.load_state_dict(converted_block.state_dict())
-
- target_model.top_conv.weight.data = torch.nn.Parameter(
- torch.from_numpy(
- tf.transpose(
- model.layers[-5 if model_config["include_top"] else -4].kernel,
- (3, 2, 0, 1),
- ).numpy()
+ with torch.no_grad():
+ # Temporarily need to supply this as custom_objects() due to a bug while
+ # saving Functional Subclassing models
+ model = tf.keras.models.load_model(
+ filepath, custom_objects={"EfficientNetV2TF": EfficientNetV2TF}
)
- )
- if model_config["include_top"]:
- # Copy top BatchNorm
- target_model.top_bn.weight.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[-4].gamma.numpy())
+ # Run dummy_input through the model to initialize
+ # model.variables
+ model(dummy_input)
+
+ model_config = model.get_config()
+ target_model = EfficientNetV2PT(
+ include_top=model_config["include_top"],
+ classes=model_config["classes"],
+ input_shape=tf.transpose(tf.squeeze(dummy_input), (2, 0, 1)).shape,
+ pooling=model_config["pooling"],
+ width_coefficient=model_config["width_coefficient"],
+ depth_coefficient=model_config["depth_coefficient"],
+ blockwise_kernel_sizes=model_config["blockwise_kernel_sizes"],
+ blockwise_num_repeat=model_config["blockwise_num_repeat"],
+ blockwise_input_filters=model_config["blockwise_input_filters"],
+ blockwise_output_filters=model_config["blockwise_output_filters"],
+ blockwise_expand_ratios=model_config["blockwise_expand_ratios"],
+ blockwise_se_ratios=model_config["blockwise_se_ratios"],
+ blockwise_strides=model_config["blockwise_strides"],
+ blockwise_conv_type=model_config["blockwise_conv_type"],
)
- target_model.top_bn.bias.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[-4].beta.numpy())
+ # Copy stem
+ target_model.stem_conv.weight.data = torch.nn.Parameter(
+ torch.from_numpy(tf.transpose(model.layers[1].kernel, (3, 2, 0, 1)).numpy())
)
- target_model.top_bn.running_mean.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[-4].moving_mean.numpy())
+ # Copy BatchNorm
+ target_model.stem_bn.weight.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[2].gamma.numpy())
)
- target_model.top_bn.running_var.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[-4].moving_variance.numpy())
+ target_model.stem_bn.bias.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[2].beta.numpy())
)
-
- # Copy head
- target_model.top_dense.weight.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[-1].kernel.numpy().transpose(1, 0))
+ target_model.stem_bn.running_mean.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[2].moving_mean.numpy())
)
- target_model.top_dense.bias.data = torch.nn.Parameter(
- torch.from_numpy(model.layers[-1].bias.numpy())
+ target_model.stem_bn.running_var.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[2].moving_variance.numpy())
)
- """
- As noted in: https://discuss.pytorch.org/t/out-of-memory-error-when-resume-training-even-though-my-gpu-is-empty/30757/5
- Sometimes, on some devices, PyTorch-based networks throw a CUDA OOM when loaded directly on the GPU. To avoid this,
- we now *save* the model and load it back, mapping to the CPU and then pushing back to the original model device.
- """
- device = model.device
- original_filepath = os.path.splitext(filepath)[0]
- torch.save(f"converted_{original_filepath}.pt")
+ tf_blocks = [
+ block
+ for block in model.layers
+ if isinstance(block, __FusedMBConvTF) or isinstance(block, __MBConvTF)
+ ]
+
+ for pt_block, tf_block in zip(target_model.blocks, tf_blocks):
+ if isinstance(tf_block, __FusedMBConvTF):
+ converted_block = fused_mbconv.tf_to_pt(tf_block)
+ pt_block.load_state_dict(converted_block.state_dict())
+ if isinstance(tf_block, __MBConvTF):
+ converted_block = mbconv.tf_to_pt(tf_block)
+ pt_block.load_state_dict(converted_block.state_dict())
+
+ target_model.top_conv.weight.data = torch.nn.Parameter(
+ torch.from_numpy(
+ tf.transpose(
+ model.layers[-5 if model_config["include_top"] else -4].kernel,
+ (3, 2, 0, 1),
+ ).numpy()
+ )
+ )
+ if model_config["include_top"]:
+ # Copy top BatchNorm
+ target_model.top_bn.weight.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[-4].gamma.numpy())
+ )
+ target_model.top_bn.bias.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[-4].beta.numpy())
+ )
+ target_model.top_bn.running_mean.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[-4].moving_mean.numpy())
+ )
+ target_model.top_bn.running_var.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[-4].moving_variance.numpy())
+ )
- target_model.load_state_dict(
- torch.load("converted_{original_filepath}.pt"), map_location="cpu"
- )
- target_model.to(device)
+ # Copy head
+ target_model.top_dense.weight.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[-1].kernel.numpy().transpose(1, 0))
+ )
+ target_model.top_dense.bias.data = torch.nn.Parameter(
+ torch.from_numpy(model.layers[-1].bias.numpy())
+ )
- if freeze_bn:
- # Freeze all BatchNorm2d layers
- for module in target_model.modules():
- if isinstance(module, torch.nn.BatchNorm2d):
- module.eval()
- module.weight.requires_grad = False
- module.bias.requires_grad = False
+ """
+ As noted in: https://discuss.pytorch.org/t/out-of-memory-error-when-resume-training-even-though-my-gpu-is-empty/30757/5
+ Sometimes, on some devices, PyTorch-based networks throw a CUDA OOM when loaded directly on the GPU. To avoid this,
+ we now *save* the model and load it back, mapping to the CPU and then pushing back to the original model device.
+ """
+ device = target_model.device
+ original_filepath = os.path.splitext(filepath)[0]
+ target_model.to("cpu")
+ torch.save(target_model.state_dict(), f"converted_{original_filepath}.pt")
+
+ target_model.load_state_dict(
+ torch.load(f"converted_{original_filepath}.pt", map_location="cpu"),
+ )
+ target_model.to(device)
+ target_model.zero_grad()
+
+ if freeze_bn:
+ # Freeze all BatchNorm2d layers
+ for module in target_model.modules():
+ if isinstance(module, torch.nn.BatchNorm2d):
+ module.eval()
+ module.weight.requires_grad = False
+ module.bias.requires_grad = False
return target_model
@@ -243,101 +249,111 @@ def load_pt_to_tf(
raise ValueError(
f"'architecture' cannot be None, and is required for PyTorch model construction."
)
-
- model = MODEL_ARCHITECTURES.get(architecture)
- model = model(backend="pytorch", **kwargs)
- model.load_state_dict(torch.load(filepath))
-
- model_config = model.get_config()
- target_model = EfficientNetV2TF(
- include_top=model_config["include_top"],
- classes=model_config["classes"],
- input_shape=dummy_input.squeeze(0).permute(1, 2, 0).shape,
- pooling=model_config["pooling"],
- width_coefficient=model_config["width_coefficient"],
- depth_coefficient=model_config["depth_coefficient"],
- blockwise_kernel_sizes=model_config["blockwise_kernel_sizes"],
- blockwise_num_repeat=model_config["blockwise_num_repeat"],
- blockwise_input_filters=model_config["blockwise_input_filters"],
- blockwise_output_filters=model_config["blockwise_output_filters"],
- blockwise_expand_ratios=model_config["blockwise_expand_ratios"],
- blockwise_se_ratios=model_config["blockwise_se_ratios"],
- blockwise_strides=model_config["blockwise_strides"],
- blockwise_conv_type=model_config["blockwise_conv_type"],
- )
-
- # Copy stem
- target_model.layers[1].kernel.assign(
- tf.convert_to_tensor(
- model.stem_conv.weight.data.permute(2, 3, 1, 0).detach().cpu().numpy()
+ with torch.no_grad():
+ model = MODEL_ARCHITECTURES.get(architecture)
+ model = model(backend="pytorch", **kwargs)
+ model.load_state_dict(torch.load(filepath))
+
+ model_config = model.get_config()
+ target_model = EfficientNetV2TF(
+ include_top=model_config["include_top"],
+ classes=model_config["classes"],
+ input_shape=dummy_input.squeeze(0).permute(1, 2, 0).shape,
+ pooling=model_config["pooling"],
+ width_coefficient=model_config["width_coefficient"],
+ depth_coefficient=model_config["depth_coefficient"],
+ blockwise_kernel_sizes=model_config["blockwise_kernel_sizes"],
+ blockwise_num_repeat=model_config["blockwise_num_repeat"],
+ blockwise_input_filters=model_config["blockwise_input_filters"],
+ blockwise_output_filters=model_config["blockwise_output_filters"],
+ blockwise_expand_ratios=model_config["blockwise_expand_ratios"],
+ blockwise_se_ratios=model_config["blockwise_se_ratios"],
+ blockwise_strides=model_config["blockwise_strides"],
+ blockwise_conv_type=model_config["blockwise_conv_type"],
)
- )
-
- # Copy BatchNorm
- target_model.layers[2].gamma.assign(
- tf.convert_to_tensor(model.stem_bn.weight.data.detach().cpu().numpy())
- )
-
- target_model.layers[2].beta.assign(
- tf.convert_to_tensor(model.stem_bn.bias.data.detach().cpu().numpy())
- )
-
- target_model.layers[2].moving_mean.assign(
- tf.convert_to_tensor(model.stem_bn.running_mean.data.detach().cpu().numpy())
- )
-
- target_model.layers[2].moving_variance.assign(
- tf.convert_to_tensor(model.stem_bn.running_var.data.detach().cpu().numpy())
- )
-
- tf_blocks = [
- block
- for block in target_model.layers
- if isinstance(block, __FusedMBConvTF) or isinstance(block, __MBConvTF)
- ]
-
- for tf_block, pt_block in zip(tf_blocks, model.blocks):
- if isinstance(pt_block, __FusedMBConvPT):
- converted_block = fused_mbconv.pt_to_tf(pt_block)
- tf_block.set_weights(converted_block.weights)
- if isinstance(pt_block, __MBConvPT):
- converted_block = mbconv.pt_to_tf(pt_block)
- tf_block.set_weights(converted_block.weights)
-
- target_model.layers[-5 if model_config["include_top"] else -4].kernel.assign(
- tf.convert_to_tensor(
- model.top_conv.weight.data.permute(2, 3, 1, 0).detach().cpu().numpy()
+ dummy_input = tf.convert_to_tensor(
+ dummy_input.permute(0, 2, 3, 1).detach().cpu().numpy()
)
- )
+ # Run dummy_input through the model to initialize
+ # model.variables
+ target_model(dummy_input)
- if model_config["include_top"]:
- # Copy top BatchNorm
- target_model.layers[-4].gamma.assign(
- tf.convert_to_tensor(model.top_bn.weight.data.detach().cpu().numpy())
+ # Copy stem
+ target_model.layers[1].kernel.assign(
+ tf.convert_to_tensor(
+ model.stem_conv.weight.data.permute(2, 3, 1, 0).detach().cpu().numpy()
+ )
+ )
+
+ # Copy BatchNorm
+ target_model.layers[2].gamma.assign(
+ tf.convert_to_tensor(model.stem_bn.weight.data.detach().cpu().numpy())
)
- target_model.layers[-4].beta.assign(
- tf.convert_to_tensor(model.top_bn.bias.data.detach().cpu().numpy())
+ target_model.layers[2].beta.assign(
+ tf.convert_to_tensor(model.stem_bn.bias.data.detach().cpu().numpy())
)
- target_model.layers[-4].moving_mean.assign(
- tf.convert_to_tensor(model.top_bn.running_mean.data.detach().cpu().numpy())
+ target_model.layers[2].moving_mean.assign(
+ tf.convert_to_tensor(model.stem_bn.running_mean.data.detach().cpu().numpy())
)
- target_model.layers[-4].moving_variance.assign(
- tf.convert_to_tensor(model.top_bn.running_var.data.detach().cpu().numpy())
+ target_model.layers[2].moving_variance.assign(
+ tf.convert_to_tensor(model.stem_bn.running_var.data.detach().cpu().numpy())
)
- # Copy head
- target_model.layers[-1].kernel.assign(
+ tf_blocks = [
+ block
+ for block in target_model.layers
+ if isinstance(block, __FusedMBConvTF) or isinstance(block, __MBConvTF)
+ ]
+
+ for tf_block, pt_block in zip(tf_blocks, model.blocks):
+ if isinstance(pt_block, __FusedMBConvPT):
+ converted_block = fused_mbconv.pt_to_tf(pt_block)
+ tf_block.set_weights(converted_block.weights)
+ if isinstance(pt_block, __MBConvPT):
+ converted_block = mbconv.pt_to_tf(pt_block)
+ tf_block.set_weights(converted_block.weights)
+
+ target_model.layers[-5 if model_config["include_top"] else -4].kernel.assign(
tf.convert_to_tensor(
- model.top_dense.weight.data.permute(1, 0).detach().cpu().numpy()
+ model.top_conv.weight.data.permute(2, 3, 1, 0).detach().cpu().numpy()
)
)
- target_model.layers[-1].bias.assign(
- tf.convert_to_tensor(model.top_dense.bias.data.detach().cpu().numpy())
- )
+ if model_config["include_top"]:
+ # Copy top BatchNorm
+ target_model.layers[-4].gamma.assign(
+ tf.convert_to_tensor(model.top_bn.weight.data.detach().cpu().numpy())
+ )
+
+ target_model.layers[-4].beta.assign(
+ tf.convert_to_tensor(model.top_bn.bias.data.detach().cpu().numpy())
+ )
+
+ target_model.layers[-4].moving_mean.assign(
+ tf.convert_to_tensor(
+ model.top_bn.running_mean.data.detach().cpu().numpy()
+ )
+ )
+
+ target_model.layers[-4].moving_variance.assign(
+ tf.convert_to_tensor(
+ model.top_bn.running_var.data.detach().cpu().numpy()
+ )
+ )
+
+ # Copy head
+ target_model.layers[-1].kernel.assign(
+ tf.convert_to_tensor(
+ model.top_dense.weight.data.permute(1, 0).detach().cpu().numpy()
+ )
+ )
+
+ target_model.layers[-1].bias.assign(
+ tf.convert_to_tensor(model.top_dense.bias.data.detach().cpu().numpy())
+ )
if freeze_bn:
# Freeze all BatchNorm2d layers
diff --git a/deepvision/models/classification/efficientnet/efficientnetv2_pt.py b/deepvision/models/classification/efficientnet/efficientnetv2_pt.py
index ef947e8..5e66730 100644
--- a/deepvision/models/classification/efficientnet/efficientnetv2_pt.py
+++ b/deepvision/models/classification/efficientnet/efficientnetv2_pt.py
@@ -234,7 +234,7 @@ def training_step(self, train_batch, batch_idx):
loss = self.compute_loss(outputs, targets)
self.log(
"loss",
- loss,
+ loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
@@ -256,7 +256,7 @@ def validation_step(self, val_batch, batch_idx):
loss = self.compute_loss(outputs, targets)
self.log(
"val_loss",
- loss,
+ loss.item(),
on_step=True,
on_epoch=True,
prog_bar=True,
diff --git a/setup.py b/setup.py
index fbf7409..438a23d 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
setup(
name="deepvision-toolkit",
- version="0.1.2",
+ version="0.1.3",
description="PyTorch and TensorFlow/Keras image models with automatic weight conversions and equal API/implementations - Vision Transformer (ViT), ResNetV2, EfficientNetV2, (planned...) DeepLabV3+, ConvNeXtV2, YOLO, NeRF, etc.",
url="https://github.com/DavidLandup0/deepvision",
author="David Landup",
| Google Colab CUDA Issue
When porting weights, Google Colab's T4 runs out of memory, due to parameter porting?
Doesn't happen locally on a 6GB GPU (1660Super) but does on a 16GB GPU (T4).
| 2023-02-13T13:28:57 | 0.0 | [] | [] |
|||
saeeddhqan/Maryam | saeeddhqan__Maryam-230 | dcf09d198b6344f5c95b7b28b4f2c95373d38523 | diff --git a/core/util/pastebin.py b/core/util/pastebin.py
index 9e0dfaf6b..98089729f 100644
--- a/core/util/pastebin.py
+++ b/core/util/pastebin.py
@@ -37,8 +37,7 @@ def __init__(self, q, limit=1, count=10):
def search(self, self2, name, q, q_formats, limit, count):
engine = getattr(self.framework, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in self.q_formats else self.q_formats['default_q']
+ q = self.q_formats[f"{name}_q"] if f"{name}_q" in self.q_formats else self.q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
diff --git a/core/util/yippy.py b/core/util/yippy.py
deleted file mode 100644
index 9746e2bce..000000000
--- a/core/util/yippy.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-OWASP Maryam!
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import re
-
-class main:
-
- def __init__(self, q, limit=2, count=100):
- """ yippy.com search engine
-
- q : Query for search
- """
- self.framework = main.framework
- self.q = q
- self.limit = limit
- self.count = count
- self._pages = ''
- self._links = []
- self.yippy = 'yippy.com'
-
- def run_crawl(self):
- reg = r"<a href=\"(https://?[^\"]+)\" class=\"title\""
- geturl = f"http://{self.yippy}/search?query={self.q}"
- self.framework.verbose('[YIPPY] Searching in the yippy.com...', end='\r')
- try:
- req = self.framework.request(url=geturl)
- except:
- self.framework.error('ConnectionError', 'util/yippy', 'run_crawl')
- self.framework.error('Yippy is missed!', 'util/yippy', 'run_crawl')
- return
-
- txt = req.text
- self._links = [x.replace('<a href="', '') for x in re.findall(reg, txt)]
- self._pages = txt
- root = re.search(r'(root-[\d]+-[\d]+%7C[\d]+)">next</', txt)
- if root:
- root = root.group()
- file = re.search(r'%3afile=([A-z0-9_\-\.]+)&', txt)
- if not file:
- self.framework.error('Yippy is missed!')
- return
- file = file.group()
- root = re.sub(r'[\d]+-[\d]+', '0-8000', root)
- newurl = f"https://yippy.com/ysa/cgi-bin/query-meta?v{file};v:state=root|" + root.replace('">next</', '')
- try:
- req = self.framework.request(url=newurl)
- except:
- self.framework.error('[YIPPY] ConnectionError')
- self.framework.error('Yippy is missed!')
- return
- self._pages += req.text
- self._links.extend([x.replace('<a href="', '').replace(' class="title"', '') for x in re.findall(reg, self._pages)])
-
- def crawl_with_response_filter(self, policy):
- policies = {'webpages': 'https://www.yippy.com/oauth/bing_yxml_api.php',
- 'images': 'https://www.yippy.com/oauth/bing_yxml_api_images.php',
- 'news': 'https://www.yippy.com/oauth/bing_yxml_api_news.php',
- 'video': 'https://www.yippy.com/oauth/bing_yxml_api_video.php'}
- policy = policy.lower()
- if policy not in policies:
- baseurl = policies['webpages']
- else:
- baseurl = policies[policy]
-
- url = f"{baseurl}?safeSearch=off&textDecorations=true&count=1000&offset=0&mkt=en-US&textFormat=HTML&q={self.q}"
- req = self.framework.request(url=url)
- if req.status_code == 429:
- self.framework.error('Rate limit is exceeded. Try again in 26 seconds.')
- return
- self._pages += req.text
- links = re.findall(r"<!\[CDATA\[([^\]]+)\]\]>", req.text)
- self._links.extend(links)
-
- @property
- def pages(self):
- return self._pages
-
- @property
- def links(self):
- return self._links
-
- @property
- def dns(self):
- return self.framework.page_parse(self._pages).get_dns(self.q)
-
- @property
- def emails(self):
- return self.framework.page_parse(self._pages).get_emails(self.q)
-
- @property
- def docs(self):
- return self.framework.page_parse(self._pages).get_docs(self.q, self.links)
diff --git a/modules/osint/cloud_storage.py b/modules/osint/cloud_storage.py
index 6dbd7f674..15ad2b569 100644
--- a/modules/osint/cloud_storage.py
+++ b/modules/osint/cloud_storage.py
@@ -21,7 +21,7 @@
'version': '0.1',
'description': 'Search for the query in online storage services like GoogleDrive, OneDrive, Dropbox,\
Amazon S3, Box and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -38,8 +38,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -64,7 +63,6 @@ def module_api(self):
for site_url in sites:
q_formats = {
'default_q': f'site:{site_url} {query}',
- 'yippy_q': f'"{site_url}" {query}',
'millionshort_q': f'site:{site_url} "{query}"',
'qwant_q': f'site:{site_url} {query}'
}
diff --git a/modules/osint/dns_search.py b/modules/osint/dns_search.py
index 186eea2ac..b1551e2b6 100644
--- a/modules/osint/dns_search.py
+++ b/modules/osint/dns_search.py
@@ -22,7 +22,7 @@
'version': '2.5',
'description': 'Search in the open-sources to find subdomans.',
'sources': ('securitytrails', 'bing', 'google', 'yahoo', 'yandex', 'metacrawler', 'ask', 'baidu', 'startpage',
- 'netcraft', 'threatcrowd', 'virustotal', 'yippy', 'otx', 'carrot2', 'crt',
+ 'netcraft', 'threatcrowd', 'virustotal', 'otx', 'carrot2', 'crt',
'qwant', 'millionshort', 'threatminer', 'jldc', 'bufferover', 'rapiddns', 'certspotter',
'sublist3r', 'riddler', 'sitedossier', 'duckduckgo', 'dnsdumpster', 'yougetsignal', 'pastebin',
'urlscan', 'gigablast', 'dogpile'),
diff --git a/modules/osint/email_search.py b/modules/osint/email_search.py
index 44b45450d..a4328faf1 100644
--- a/modules/osint/email_search.py
+++ b/modules/osint/email_search.py
@@ -18,7 +18,7 @@
'version': '1.0',
'description': 'Search in open-sources to find emails.',
'sources': ('bing', 'pastebin', 'google', 'yahoo', 'yandex', 'metacrawler',
- 'ask', 'baidu', 'startpage', 'yippy', 'qwant', 'duckduckgo', 'hunter', 'gigablast', 'github'),
+ 'ask', 'baidu', 'startpage', 'qwant', 'duckduckgo', 'hunter', 'gigablast', 'github'),
'options': (
('query', None, True, 'Domain name or company name', '-q', 'store', str),
('limit', 3, False, 'Search limit(number of pages, default=3)', '-l', 'store', int),
@@ -37,7 +37,6 @@ def search(self, name, q, q_formats, limit, count):
global EMAILS
engine = getattr(self, name)
eng = name
- name = engine.__init__.__name__
varnames = engine.__init__.__code__.co_varnames
q = q_formats[f"{eng}"] if f"{eng}" in q_formats else q_formats['default']
if 'limit' in varnames and 'count' in varnames:
diff --git a/modules/osint/github_leaks.py b/modules/osint/github_leaks.py
index c36ba8932..eac27e9c8 100644
--- a/modules/osint/github_leaks.py
+++ b/modules/osint/github_leaks.py
@@ -22,7 +22,7 @@
'author': 'Aman Singh',
'version': '0.1',
'description': 'Search your query in the GitHub and show the potentially leaked info.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo', 'github'),
+ 'sources': ('google', 'carrot2', 'bing','yahoo', 'millionshort', 'qwant', 'duckduckgo', 'github'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -79,7 +79,6 @@ def search(self, name, q, q_formats, limit, count):
engine = getattr(self, name)
eng = name
q = q_formats[f"{name}"] if f"{name}" in q_formats else q_formats['default']
- name = engine.__init__.__name__
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -105,7 +104,6 @@ def module_api(self):
engines = self.options['engine'].split(',')
q_formats = {
'default': f"site:github.com {query}",
- 'yippy': f'"github.com" {query}',
'millionshort': f'site:github.com "{query}"',
'qwant': f'site:github.com {query}',
'github': f'{query}'
diff --git a/modules/osint/social_nets.py b/modules/osint/social_nets.py
index 5865399c2..059f9b559 100644
--- a/modules/osint/social_nets.py
+++ b/modules/osint/social_nets.py
@@ -34,7 +34,6 @@ def search(self, name, q, q_formats, limit, count):
global PAGES
engine = getattr(self, name)
eng = name
- name = engine.__init__.__name__
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
diff --git a/modules/search/discord.py b/modules/search/discord.py
index a7bd868e1..5a8c794a8 100644
--- a/modules/search/discord.py
+++ b/modules/search/discord.py
@@ -38,7 +38,6 @@
def search(self, name, q, q_formats, limit, count):
global PAGES, LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames:
attr = engine(q, limit)
diff --git a/modules/search/facebook.py b/modules/search/facebook.py
index ccb53ea89..4722301a6 100644
--- a/modules/search/facebook.py
+++ b/modules/search/facebook.py
@@ -22,7 +22,7 @@
'author': 'Saeed',
'version': '0.1',
'description': 'Search your query in the facebook.com and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -39,8 +39,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -61,7 +60,6 @@ def module_api(self):
output = {'links': [], 'people': [], 'groups': [], 'hashtags': []}
q_formats = {
'default_q': f"site:facebook.com {query}",
- 'yippy_q': f'"facebook.com" {query}',
'millionshort_q': f'site:facebook.com "{query}"',
'qwant_q': f'site:facebook.com {query}'
}
diff --git a/modules/search/github.py b/modules/search/github.py
index ad8c52e07..fa061a0e1 100644
--- a/modules/search/github.py
+++ b/modules/search/github.py
@@ -19,7 +19,7 @@
'author': 'Aman Singh',
'version': '1.0',
'description': 'Search your query in the GitHub and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo', 'github'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo', 'github'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -41,7 +41,6 @@ def search(self, name, q, q_formats, limit, count):
engine = getattr(self, name)
eng = name
q = q_formats[f"{name}"] if f"{name}" in q_formats else q_formats['default']
- name = engine.__init__.__name__
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -68,7 +67,6 @@ def module_api(self):
output = {'repositories': [], 'blogs': [], 'usernames': [], 'emails': set()}
q_formats = {
'default': f"site:github.com {query}",
- 'yippy': f'"github.com" {query}',
'millionshort': f'site:github.com "{query}"',
'qwant': f'site:github.com {query}',
'github': f'{query}'
diff --git a/modules/search/instagram.py b/modules/search/instagram.py
index 66fedea8a..05c8352c6 100644
--- a/modules/search/instagram.py
+++ b/modules/search/instagram.py
@@ -22,7 +22,7 @@
'author': 'Aman Singh',
'version': '0.7',
'description': 'Search your query in the Instagram and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo', 'instagram'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo', 'instagram'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -46,8 +46,7 @@ def search(self, name, q, q_formats, limit, count, session_id):
eng = name
query = q
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
# for instagram
@@ -94,7 +93,6 @@ def module_api(self):
q_formats = {
'default_q': f"site:www.instagram.com {query}",
'google_q': f"site:www.instagram.com inurl:{query}",
- 'yippy_q': f"www.instagram.com {query}",
'instagram': f"{query}"
}
diff --git a/modules/search/linkedin.py b/modules/search/linkedin.py
index 77155f96b..4e56c7201 100644
--- a/modules/search/linkedin.py
+++ b/modules/search/linkedin.py
@@ -20,7 +20,7 @@
'author': 'Saeed',
'version': '0.5',
'description': 'Search your query in the linkedin.com and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -38,8 +38,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -60,7 +59,6 @@ def module_api(self):
output = {'links': [], 'usernames': [], 'blogs': []}
q_formats = {
'default_q': f"site:linkedin.com {query}",
- 'yippy_q': f'"linkedin.com" {query}',
'millionshort_q': f'site:linkedin.com "{query}"',
'qwant_q': f'site:linkedin.com {query}'
}
diff --git a/modules/search/pastebin.py b/modules/search/pastebin.py
index 15c37ae0e..66d72c1b2 100644
--- a/modules/search/pastebin.py
+++ b/modules/search/pastebin.py
@@ -40,8 +40,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
diff --git a/modules/search/quora.py b/modules/search/quora.py
index 8d685f3a3..fba967e3a 100644
--- a/modules/search/quora.py
+++ b/modules/search/quora.py
@@ -23,7 +23,7 @@
'author': 'Aman Rawat',
'version': '0.2',
'description': 'Search your query in the quora.com and show the results.',
- 'sources': ('google', 'yahoo', 'bing', 'yippy', 'metacrawler', 'millionshort', 'carrot2', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'yahoo', 'bing', 'metacrawler', 'millionshort', 'carrot2', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -39,8 +39,7 @@
def search(self, name, q, q_formats, limit, count):
global LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -62,7 +61,6 @@ def module_api(self):
output = {'links': [], 'usernames': []}
q_formats = {
'default_q': f"site:www.quora.com {query}",
- 'yippy_q': f'"www.quora.com" {query}',
'millionshort_q': f'site:www.quora.com "{query}"',
'qwant_q': f'site:www.quora.com {query}'
}
diff --git a/modules/search/reddit.py b/modules/search/reddit.py
index 3673c2226..cfbfd4fce 100644
--- a/modules/search/reddit.py
+++ b/modules/search/reddit.py
@@ -22,7 +22,7 @@
'author': 'Kunal Khandelwal',
'version': '0.5',
'description': 'Search your query in the Reddit and show the results.',
- 'sources': ('google', 'yahoo', 'bing', 'yippy', 'metacrawler', 'millionshort', 'carrot2', 'qwant'),
+ 'sources': ('google', 'yahoo', 'bing', 'duckduckgo', 'metacrawler', 'millionshort', 'carrot2', 'qwant'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -39,8 +39,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -61,7 +60,6 @@ def module_api(self):
output = {'links': [], 'usernames': []}
q_formats = {
'default_q': f"site:www.reddit.com {query}",
- 'yippy_q': f'"www.reddit.com" {query}',
'millionshort_q': f'site:www.reddit.com "{query}"',
'qwant_q': f'site:www.reddit.com {query}'
}
diff --git a/modules/search/spotify.py b/modules/search/spotify.py
index 62cc2fd96..2fc706b34 100644
--- a/modules/search/spotify.py
+++ b/modules/search/spotify.py
@@ -20,7 +20,7 @@
'author': 'Kunal Khandelwal',
'version': '0.1',
'description': 'Search artists, albums, playlist and users on spotify',
- 'sources': ('google', 'yahoo', 'bing', 'yippy', 'metacrawler', 'millionshort', 'carrot2', 'qwant'),
+ 'sources': ('google', 'yahoo', 'bing','duckduckgo', 'metacrawler', 'millionshort', 'carrot2', 'qwant'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -36,8 +36,7 @@
def search(self, name, q, q_formats, limit, count):
global LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -58,7 +57,6 @@ def module_api(self):
output = {'user': [], 'artist': [], 'playlist': [], 'album': []}
q_formats = {
'default_q': f"site:open.spotify.com {query}",
- 'yippy_q': f'"open.spotify.com" {query}',
'millionshort_q': f'site:open.spotify.com "{query}"',
'qwant_q': f'site:open.spotify.com {query}'
}
diff --git a/modules/search/stackoverflow.py b/modules/search/stackoverflow.py
index f5189efbf..ea3ffc709 100644
--- a/modules/search/stackoverflow.py
+++ b/modules/search/stackoverflow.py
@@ -23,7 +23,7 @@
'author': 'Sanjiban Sengupta',
'version': '0.5',
'description': 'Search your query in the stackoverflow.com and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -40,8 +40,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
diff --git a/modules/search/telegram.py b/modules/search/telegram.py
index 0478e64e6..e6ce40ac9 100644
--- a/modules/search/telegram.py
+++ b/modules/search/telegram.py
@@ -21,7 +21,7 @@
'author': 'Vikas Kundu',
'version': '0.1',
'description': 'Search the publicly listed telegram groups for juicy info like emails, phone numbers etc',
- 'sources': ('telegramchannels.me','google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('telegramchannels.me','google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -39,8 +39,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -88,7 +87,6 @@ def module_api(self):
output = {'group-links': [], 'handles': [], 'phone-numbers': []}
q_formats = {
'default_q': f"site:t.me/joinchat {query}",
- 'yippy_q': f'"t.me/joinchat" {query}',
'millionshort_q': f'site:t.me/joinchat "{query}"',
'qwant_q': f'site:t.me/joinchat {query}'
}
diff --git a/modules/search/tiktok.py b/modules/search/tiktok.py
index 1ff937892..d1520f7d1 100644
--- a/modules/search/tiktok.py
+++ b/modules/search/tiktok.py
@@ -23,7 +23,7 @@
'author': 'Prakhar Jain',
'version': '0.1',
'description': 'Search your query on TikTok and show the results.',
- 'sources': ('google', 'yahoo', 'bing', 'yippy', 'metacrawler', 'millionshort', 'carrot2', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'yahoo', 'bing', 'metacrawler', 'millionshort', 'carrot2', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -39,8 +39,7 @@
def search(self, name, q, q_formats, limit, count):
global LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -60,7 +59,6 @@ def module_api(self):
output = {'videos': [], 'usernames': [], 'tags': [], 'music': []}
q_formats = {
'default_q': f"site:www.tiktok.com {query}",
- 'yippy_q': f'"www.tiktok.com" {query}',
'millionshort_q': f'site:www.tiktok.com "{query}"',
'qwant_q': f'site:www.tiktok.com {query}'
}
diff --git a/modules/search/trello.py b/modules/search/trello.py
index a66ffadf0..fc3af60f2 100644
--- a/modules/search/trello.py
+++ b/modules/search/trello.py
@@ -21,7 +21,7 @@
'author': 'Vikas Kundu',
'version': '0.5',
'description': 'Search public trello boards for data and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -39,8 +39,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -61,7 +60,6 @@ def module_api(self):
output = {'links': [], 'usernames': [], 'handles': []}
q_formats = {
'default_q': f"site:trello.com {query}",
- 'yippy_q': f'"trello.com" {query}',
'millionshort_q': f'site:trello.com "{query}"',
'qwant_q': f'site:trello.com {query}'
}
diff --git a/modules/search/twitter.py b/modules/search/twitter.py
index b76f6db2a..03dc1011b 100644
--- a/modules/search/twitter.py
+++ b/modules/search/twitter.py
@@ -22,7 +22,7 @@
'author': 'Saeed',
'version': '0.5',
'description': 'Search your query in the twitter.com and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
@@ -40,8 +40,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -62,8 +61,7 @@ def module_api(self):
output = {'links': [], 'people': [], 'hashtags': []}
q_formats = {
'default_q': f"site:twitter.com {query}",
- 'millionshort_q': f'site:twitter.com "{query}"',
- 'yippy_q': f"twitter.com {query}"
+ 'millionshort_q': f'site:twitter.com "{query}"'
}
self.thread(search, self.options['thread'], engines, query, q_formats, limit, count, meta['sources'])
diff --git a/modules/search/yippy.py b/modules/search/yippy.py
deleted file mode 100644
index fdad6fa41..000000000
--- a/modules/search/yippy.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-OWASP Maryam!
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-
-meta = {
- 'name': 'Yippy Search',
- 'author': 'Saeed',
- 'version': '0.2',
- 'description': 'Search your query in the yippy.com and show the results.',
- 'sources': ('yippy',),
- 'options': (
- ('query', None, True, 'Query string', '-q', 'store', str),
- ('method', 'webpages', False, \
- 'Yippy methods("webpages", "images", "news", "video"). default=None', '-m', 'store', str),
- ),
- 'examples': ('yippy -q <QUERY>', 'yippy -q <QUERY> -m images')
-}
-
-def module_api(self):
- query = self.options['query']
- method = self.options['method'].lower()
- run = self.yippy(query)
- if method:
- if method == 'webpages':
- run.crawl_with_response_filter(method)
- run.run_crawl()
- else:
- if method in ('images', 'news', 'video'):
- run.crawl_with_response_filter(method)
- else:
- run.run_crawl()
- links = run.links
- self.save_gather({'links': links}, 'search/yippy', query, output=self.options['output'])
- return links
-
-def module_run(self):
- self.alert_results(module_api(self))
\ No newline at end of file
diff --git a/modules/search/youtube.py b/modules/search/youtube.py
index ebab4e7c6..7b001d37a 100644
--- a/modules/search/youtube.py
+++ b/modules/search/youtube.py
@@ -21,13 +21,13 @@
'author': 'Aman Rawat',
'version': '0.5',
'description': 'Search your query in the youtube.com and show the results.',
- 'sources': ('google', 'carrot2', 'bing', 'yippy', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
+ 'sources': ('google', 'carrot2', 'bing', 'yahoo', 'millionshort', 'qwant', 'duckduckgo'),
'options': (
('query', None, True, 'Query string', '-q', 'store', str),
('limit', 1, False, 'Search limit(number of pages, default=1)', '-l', 'store', int),
('count', 50, False, 'Number of results per page(min=10, max=100, default=50)', '-c', 'store', int),
('thread', 2, False, 'The number of engine that run per round(default=2)', '-t', 'store', int),
- ('engine', 'google,yippy', False, 'Engine names for search(default=google)', '-e', 'store', str),
+ ('engine', 'google', False, 'Engine names for search(default=google)', '-e', 'store', str),
),
'examples': ('youtube -q <QUERY> -l 15 --output',)
}
@@ -38,8 +38,7 @@
def search(self, name, q, q_formats, limit, count):
global PAGES,LINKS
engine = getattr(self, name)
- name = engine.__init__.__name__
- q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
+ q = q_formats[f"{name}_q"] if f"{name}_q" in q_formats else q_formats['default_q']
varnames = engine.__init__.__code__.co_varnames
if 'limit' in varnames and 'count' in varnames:
attr = engine(q, limit, count)
@@ -65,7 +64,6 @@ def module_api(self):
q_formats = {
'ch_q': f"site:youtube.com inurl:/c/ OR inurl:/user/ {query}",
'default_q': f"site:youtube.com {query}",
- 'yippy_q': f"www.youtube.com {query}",
'qwant_q': f"site:www.youtube.com {query}",
'millionshort_q': f'site:www.youtube.com "{query}"',
}
| [Bug] in Reddit Module and few other Modules
As pointed by @vikas-kundu , there are some errors in modules/search/reddit.py;
42. name = engine.__init__.__name__
43. q = f"{name}_q" if f"{name}_q" in q_formats else q_formats['default_q']
Line 42 is causing name= init, such that q is always set to q_formats['default_q'] despite whatever the search engine.
In picture 1 q is set to site:www.reddit.com microsoft instead of "www.reddit.com" microsoft
pic 1 without changes and pic 2 is after changes


| Similar kind of bugs are found in Linkedin, and other modules. I will find them and resolve asap.
This is a module error so just install the modules

- `python3 -m pip install -r requirements`
## I didn't get any error

Try to make branch even with saeeddhqan:master.
https://docs.github.com/en/github/getting-started-with-github/getting-changes-from-a-remote-repository
@rishabhjainfinal You didn't get any result either. The bug is regarding that.
> Similar kind of bugs are found in Linkedin, and other modules. I will find them and resolve asap.
The LinkedIn module was working a few days ago. Maybe some recent commits broke it?
>
>
> > Similar kind of bugs are found in Linkedin, and other modules. I will find them and resolve asap.
>
> The LinkedIn module was working a few days ago. Maybe some recent commits broke it?
Linkedin and most of such modules are fine in general just a few search engine options in them are buggy . | 2021-05-10T07:58:32 | 0.0 | [] | [] |
||
SoftwareUnderstanding/inspect4py | SoftwareUnderstanding__inspect4py-323 | 0da6e75ef46193538ecd9acc587c871503d21ade | diff --git a/code_inspector/utils.py b/code_inspector/utils.py
index eb95039..7dbc542 100644
--- a/code_inspector/utils.py
+++ b/code_inspector/utils.py
@@ -156,33 +156,34 @@ def extract_software_invocation(dir_info, dir_tree_info, input_path, call_list,
body_only_files = []
flag_service_main = 0
for key in dir_info: # filter (lambda key: key not in "directory_tree", dir_info):
- for elem in dir_info[key]:
- if elem["main_info"]["main_flag"]:
- flag_service_main = 0
- flag_service = 0
- main_stored = 0
- if elem["is_test"]:
- test_files_main.append(elem["file"]["path"])
- main_stored = 1
- else:
- try:
- # 2. Exploration for services in files with "mains"
- flag_service, software_invocation_info = service_check(elem, software_invocation_info,
+ if key!="requirements":
+ for elem in dir_info[key]:
+ if elem["main_info"]["main_flag"]:
+ flag_service_main = 0
+ flag_service = 0
+ main_stored = 0
+ if elem["is_test"]:
+ test_files_main.append(elem["file"]["path"])
+ main_stored = 1
+ else:
+ try:
+ # 2. Exploration for services in files with "mains"
+ flag_service, software_invocation_info = service_check(elem, software_invocation_info,
server_dependencies, "main", readme)
- except:
- main_files.append(elem["file"]["path"])
+ except:
+ main_files.append(elem["file"]["path"])
- if flag_service:
- flag_service_main = 1
+ if flag_service:
+ flag_service_main = 1
- if not flag_service and not main_stored:
- main_files.append(elem["file"]["path"])
+ if not flag_service and not main_stored:
+ main_files.append(elem["file"]["path"])
- elif elem["is_test"]:
- test_files_no_main.append(elem["file"]["path"])
- # Filtering scripts with just body in software invocation
- elif elem['body']['calls']:
- body_only_files.append(elem)
+ elif elem["is_test"]:
+ test_files_no_main.append(elem["file"]["path"])
+ # Filtering scripts with just body in software invocation
+ elif elem['body']['calls']:
+ body_only_files.append(elem)
m_secondary = [0] * len(main_files)
flag_script_main = 0
| Error when extracting requirements
If I do:
```
code_inspector -i ../test_repos/somef/src -o test_dir -r
```
then, I get some results (although incomplete). But if I do
```
code_inspector -i ../test_repos/somef/src -o test_dir -r -si
```
I get the following error:
```
Traceback (most recent call last):
File "/home/dgarijo/Documents/GitHub/code_inspector/env_3.9/bin/code_inspector", line 33, in <module>
sys.exit(load_entry_point('code-inspector', 'console_scripts', 'code_inspector')())
File "/home/dgarijo/Documents/GitHub/code_inspector/env_3.9/lib/python3.9/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/home/dgarijo/Documents/GitHub/code_inspector/env_3.9/lib/python3.9/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/home/dgarijo/Documents/GitHub/code_inspector/env_3.9/lib/python3.9/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/dgarijo/Documents/GitHub/code_inspector/env_3.9/lib/python3.9/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/home/dgarijo/Documents/GitHub/code_inspector/code_inspector/cli.py", line 1276, in main
all_soft_invocation_info_list = extract_software_invocation(dir_info, directory_tree_info, input_path,
File "/home/dgarijo/Documents/GitHub/code_inspector/code_inspector/utils.py", line 160, in extract_software_invocation
if elem["main_info"]["main_flag"]:
TypeError: string indices must be integers
```
| 2022-01-24T12:04:41 | 0.0 | [] | [] |
|||
anarchy-ai/LLM-VM | anarchy-ai__LLM-VM-316 | e704517bfe082b07499400a567b938dd1298db8c | diff --git a/src/llm_vm/onsite_llm.py b/src/llm_vm/onsite_llm.py
index f134c769..73d1cfef 100644
--- a/src/llm_vm/onsite_llm.py
+++ b/src/llm_vm/onsite_llm.py
@@ -15,13 +15,16 @@
LlamaTokenizer,
DataCollatorForLanguageModeling,
TrainingArguments,
- Trainer)
+ Trainer,
+ BitsAndBytesConfig)
import time
from datetime import datetime
import tempfile
import json
import os
import torch
+from peft import get_peft_model, LoraConfig, prepare_model_for_kbit_training
+from trl import SFTTrainer
__private_key_value_models_map = {}
@@ -199,9 +202,153 @@ def asynctune():
return asynctune
+ def lora_finetune(self, data, optimizer, c_id, model_filename=None):
+ def async_lora():
+ old_model = optimizer.storage.get_model(c_id)
+ if old_model is not None:
+ self.model.load_state_dict(torch.load(old_model))
+ untokenized_final_dataset = []
+ for prompt,response in data:
+ untokenized_final_dataset.append(prompt + response)
+ tokenized_final_dataset = map(self.tokenizer,untokenized_final_dataset)
+ self.tokenizer.pad_token = self.tokenizer.eos_token
+ data_collator = DataCollatorForLanguageModeling(tokenizer=self.tokenizer, mlm=False)
+ optimizer.storage.set_training_in_progress(c_id, True)
+ training_args = TrainingArguments(
+ output_dir=os.path.join(model_path_default,"finetuned_models",),
+ evaluation_strategy="epoch",
+ learning_rate=2e-5,
+ per_device_train_batch_size = 1,
+ per_device_eval_batch_size = 1,
+ num_train_epochs=5,
+ weight_decay=0.01,
+ report_to= "none",
+ )
+ test_set = FinetuningDataset(tokenized_final_dataset,len(untokenized_final_dataset))
+
+ peft_config = LoraConfig(
+ r=16,
+ lora_alpha=32,
+ lora_dropout=0.05,
+ bias="none",
+ task_type="CAUSAL_LM",
+ )
+
+ trainer = SFTTrainer(
+ self.model,
+ args=training_args,
+ train_dataset=test_set,
+ eval_dataset=test_set,
+ data_collator=data_collator,
+ peft_config=peft_config
+ )
+ os.makedirs(os.path.join(model_path_default,"finetuned_models", self.model_name), exist_ok=True)
+ if tokenized_final_dataset:
+ trainer.train()
+ eval_results = trainer.evaluate()
+ optimizer.storage.set_training_in_progress(c_id, False)
+
+ if os.name == "nt":
+ timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
+ else:
+ timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+ new_model = os.path.join(model_path_default,"finetuned_models",self.model_name, timestamp + '_' + self.model_name + ".pt" ) if model_filename is None else os.path.join(model_path_default,"finetuned_models",model_filename)
+ open(new_model,"a")
+ torch.save(self.model.state_dict(), new_model) # the model in memory is different now
+ self.model_name = self.model_name + "_ft_"+ timestamp
+ optimizer.storage.set_model(c_id, new_model)
+ return math.exp(eval_results['eval_loss']) #perplexity is the metric we use for finetuning measurement
+ return async_lora
+
+ def quantize_model(self, bits=4):
+ if self.model.is_quantizable():
+ q4_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.bfloat16)
+ q8_config = BitsAndBytesConfig(load_in_8bit=True, bnb_4bit_compute_dtype=torch.bfloat16)
+
+ if bits == 4:
+ q_model = AutoModelForCausalLM.from_pretrained(self.model_uri, quantization_config=q4_config)
+ elif bits == 8:
+ q_model = AutoModelForCausalLM.from_pretrained(self.model_uri, quantization_config=q8_config)
+ else:
+ raise ValueError("Only 4-bit and 8-bit quantization supported")
+ return q_model
+ else:
+ raise NotImplementedError(f"{self.model} cannot be quantized")
+
+
+ def qlora_finetune(self, data, optimizer, c_id, model_filename=None):
+ def async_qlora():
+ old_model = optimizer.storage.get_model(c_id)
+ if old_model is not None:
+ self.model.load_state_dict(torch.load(old_model))
+ untokenized_final_dataset = []
+ for prompt,response in data:
+ untokenized_final_dataset.append(prompt + response)
+ tokenized_final_dataset = map(self.tokenizer,untokenized_final_dataset)
+ self.tokenizer.pad_token = self.tokenizer.eos_token
+ data_collator = DataCollatorForLanguageModeling(tokenizer=self.tokenizer, mlm=False)
+ optimizer.storage.set_training_in_progress(c_id, True)
+ training_args = TrainingArguments(
+ output_dir=os.path.join(model_path_default,"finetuned_models",),
+ evaluation_strategy="epoch",
+ per_device_train_batch_size=1,
+ gradient_accumulation_steps=4,
+ warmup_steps=2,
+ max_steps=10,
+ learning_rate=2e-4,
+ fp16=True,
+ logging_steps=1,
+ optim="paged_adamw_8bit"
+ )
+ test_set = FinetuningDataset(tokenized_final_dataset,len(untokenized_final_dataset))
+
+ self.model.gradient_checkpointing_enable()
+ self.model = prepare_model_for_kbit_training(self.model)
+ config = LoraConfig(
+ r=8,
+ lora_alpha=32,
+ target_modules=["query_key_value"],
+ lora_dropout=0.05,
+ bias="none",
+ task_type="CAUSAL_LM"
+ )
+ self.model = get_peft_model(self.model, config)
+ trainer = Trainer(
+ self.model,
+ args=training_args,
+ train_dataset=test_set,
+ eval_dataset=test_set,
+ data_collator=data_collator,
+ )
+ os.makedirs(os.path.join(model_path_default,"finetuned_models", self.model_name), exist_ok=True)
+ if tokenized_final_dataset:
+ trainer.train()
+ eval_results = trainer.evaluate()
+ optimizer.storage.set_training_in_progress(c_id, False)
+
+ if os.name == "nt":
+ timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
+ else:
+ timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
+ new_model = os.path.join(model_path_default,"finetuned_models",self.model_name, timestamp + '_' + self.model_name + ".pt" ) if model_filename is None else os.path.join(model_path_default,"finetuned_models",model_filename)
+ open(new_model,"a")
+ torch.save(self.model.state_dict(), new_model) # the model in memory is different now
+ self.model_name = self.model_name + "_ft_"+ timestamp
+ optimizer.storage.set_model(c_id, new_model)
+ return math.exp(eval_results['eval_loss']) #perplexity is the metric we use for finetuning measurement
+ return async_qlora
+
def finetune_immediately(self):
self.finetune()()
+ def lora_finetune_immediately(self):
+ self.lora_finetune()()
+
+ def qlora_finetune_immediately(self):
+ self.qlora_finetune()()
+
+
+
"""
this factorization isn't necessarily the greatest, nor should it be viewed
as likely being more general, aside from covering hugging face transformers
| Implement LORA\QLORA for Nvidia GPUs
Can be done with `peft` and `bitsandbytes`
| Ref: https://huggingface.co/blog/4bit-transformers-bitsandbytes
and this https://github.com/TimDettmers/bitsandbytes
@VictorOdede Can you add any references that may be helpful ?
This might also be helpful: https://github.com/huggingface/peft | 2023-10-06T12:27:25 | 0.0 | [] | [] |
||
online-judge-tools/oj | online-judge-tools__oj-880 | 0e385dbf575e0984a7dae1bb61443c0267e153bb | diff --git a/onlinejudge_command/utils.py b/onlinejudge_command/utils.py
index 5be204f7..87dfe8f7 100644
--- a/onlinejudge_command/utils.py
+++ b/onlinejudge_command/utils.py
@@ -190,7 +190,10 @@ def webbrowser_register_explorer_exe() -> None:
if not is_windows_subsystem_for_linux():
return
instance = webbrowser.GenericBrowser('explorer.exe')
- webbrowser.register('explorer', None, instance) # TODO: use `preferred=True` to solve the issue that terminal is cleared, when the version of Python becomes 3.7 or higher
+ if sys.version_info < (3, 7):
+ webbrowser.register('explorer', None, instance) # TODO: remove this after Python 3.6 supprot is finished
+ else:
+ webbrowser.register('explorer', None, instance, preferred=True) # `preferred=True` is used to solve the issue that terminal is cleared
def get_default_command() -> str:
| Terminal is cleared after submission
## Summary / æ¦è¦
<blockquote class="twitter-tweet" data-partner="tweetdeck"><p lang="ja" dir="ltr"><a href="https://twitter.com/kimiyuki_u?ref_src=twsrc%5Etfw">@kimiyuki_u</a> <br>ãã¿ã¾ããã<a href="https://t.co/Kce6EfmnsN">https://t.co/Kce6EfmnsN</a><br>ãåèã«ãã¦Windows+wslã§ç«¶ããã®ç°å¢æ§ç¯ããããã¨ãã¦ããã®ã§ãããonline-judge toolsãç¨ãã¦submitããã¨ãããã¿ã¼ããã«ã®ãã°ãæ¶å¤±ãã¦ãã¾ã(wslãä¸åº¦çµäºããã¦åèµ·åããã¦ãã?)ããã§ã<br>ä½ã解決çã«ã¤ãã¦ãåç¥ã§ããããï¼</p>— ããã¨ã (@kyort0n) <a href="https://twitter.com/kyort0n/status/1415482810467381251?ref_src=twsrc%5Etfw">July 15, 2021</a></blockquote>
<script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
## Steps to reproduce / åç¾æ¹æ³
1. `$ oj s ...`
environments:
- WSL 2
- Chrome
## Expected behavior / æå¾
ãããæå
## Actual behavior / å®éã®æå
Logs at user's terminal is cleared.
## Other notes / ãã®ä»
https://github.com/online-judge-tools/oj/pull/784#issuecomment-657091616
| 2021-07-15T10:10:53 | 0.0 | [] | [] |
|||
larray-project/larray | larray-project__larray-1027 | 9f121d8537fd7e6940ca09662ee8fb9831eb6c7d | diff --git a/doc/source/changes/version_0_34.rst.inc b/doc/source/changes/version_0_34.rst.inc
index 616daf24a..5d2b3b67c 100644
--- a/doc/source/changes/version_0_34.rst.inc
+++ b/doc/source/changes/version_0_34.rst.inc
@@ -66,6 +66,8 @@ Miscellaneous improvements
Fixes
^^^^^
+* fixed displaying plots made via Array.plot outside of LArray editor (closes :issue:`1019`).
+
* fixed Array.insert when no label is provided (closes :issue:`879`).
* fixed comparison between Array and None returning False instead of an array of boolean values
diff --git a/larray/core/array.py b/larray/core/array.py
index 4ba1e5f10..7b9d8835b 100644
--- a/larray/core/array.py
+++ b/larray/core/array.py
@@ -463,7 +463,7 @@ def _plot_array(array, *args, x=None, y=None, series=None, _x_axes_last=False, *
def __call__(self, x=None, y=None, ax=None, subplots=False, layout=None, figsize=None,
sharex=None, sharey=False, tight_layout=None, constrained_layout=None, title=None, legend=None,
**kwargs):
- from matplotlib.figure import Figure
+ from matplotlib import pyplot as plt
array = self.array
legend_kwargs = legend if isinstance(legend, dict) else {}
@@ -476,7 +476,7 @@ def __call__(self, x=None, y=None, ax=None, subplots=False, layout=None, figsize
if subplots:
if ax is not None:
raise ValueError("ax cannot be used in combination with subplots argument")
- fig = Figure(figsize=figsize, tight_layout=tight_layout, constrained_layout=constrained_layout)
+ fig = plt.figure(figsize=figsize, tight_layout=tight_layout, constrained_layout=constrained_layout)
num_subplots = subplot_axes.size
if layout is None:
@@ -506,7 +506,7 @@ def __call__(self, x=None, y=None, ax=None, subplots=False, layout=None, figsize
**kwargs)
else:
if ax is None:
- fig = Figure(figsize=figsize, tight_layout=tight_layout, constrained_layout=constrained_layout)
+ fig = plt.figure(figsize=figsize, tight_layout=tight_layout, constrained_layout=constrained_layout)
ax = fig.subplots(1, 1)
self._plot_array(array, x=x, y=y, series=series_axes, ax=ax, legend=False, title=title, **kwargs)
@@ -7294,7 +7294,7 @@ def plot(self) -> PlotObject:
Create a figure containing 2 x 2 graphs
>>> # see matplotlib.pyplot.subplots documentation for more details
- >>> fig, ax = plt.subplots(2, 2, figsize=(15, 15)) # doctest: +SKIP
+ >>> fig, ax = plt.subplots(2, 2, figsize=(10, 8), tight_layout=True) # doctest: +SKIP
>>> # line plot with 2 curves (Males and Females) in the top left corner (0, 0)
>>> arr.plot(ax=ax[0, 0], title='line plot') # doctest: +SKIP
>>> # bar plot with stacked values in the top right corner (0, 1)
| Array.plot() is broken
One cannot display those plots anymore outside of the LArray editor (our doctests are thus wrong)
| 2022-09-28T15:39:07 | 0.0 | [] | [] |
|||
cancervariants/therapy-normalization | cancervariants__therapy-normalization-130 | 49313c0e0fcab5f0cc43cf1437cd75f9c43940a7 | diff --git a/setup.py b/setup.py
index 7bd0a257..2e327eba 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
"""Defines how metakb is packaged and distributed."""
from setuptools import setup
-setup(version="0.2.12")
+setup(version="0.2.13")
diff --git a/therapy/__init__.py b/therapy/__init__.py
index 5da78ed3..488d90a8 100644
--- a/therapy/__init__.py
+++ b/therapy/__init__.py
@@ -10,7 +10,7 @@
logger.setLevel(logging.DEBUG)
# TODO: Fix so that we don't have to change in setup.cfg
-__version__ = "0.2.12"
+__version__ = "0.2.13"
class DownloadException(Exception):
diff --git a/therapy/etl/chemidplus.py b/therapy/etl/chemidplus.py
index 1f5543ac..78f670de 100644
--- a/therapy/etl/chemidplus.py
+++ b/therapy/etl/chemidplus.py
@@ -75,7 +75,9 @@ def _download_data(self, data_path: Path):
logger.info('Downloaded ChemIDplus source file.')
except TimeoutError:
logger.error('Connection to EBI FTP server timed out.')
- date = ET.parse(outfile_path).getroot().attrib['date']
+
+ parser = ET.iterparse(outfile_path, ('start', 'end'))
+ date = next(parser)[1].attrib['date']
version = date.replace('-', '')
outfile_path.rename(data_path / f'chemidplus_{version}.xml')
logger.info('Finished downloading ChemIDplus data')
@@ -105,12 +107,25 @@ def _get_file(self, data_dir):
return sorted([f for f in dir_files
if f.name.startswith('chemidplus')])
+ @staticmethod
+ def parse_xml(path: Path, tag: str):
+ """Parse XML file and retrieve elements with matching tag value.
+ :param Path path: path to XML file
+ :param str tag: XML tag
+ :return: generator yielding elements of corresponding tag
+ """
+ context = iter(ET.iterparse(path, events=('start', 'end')))
+ _, root = next(context)
+ for event, elem in context:
+ if event == 'end' and elem.tag == tag:
+ yield elem
+ root.clear()
+
def _transform_data(self):
"""Open dataset and prepare for loading into database."""
- tree = ET.parse(self._data_src)
- root = tree.getroot()
+ parser = self.parse_xml(self._data_src, 'Chemical')
with self.database.therapies.batch_writer() as batch:
- for chemical in root:
+ for chemical in parser:
if 'displayName' not in chemical.attrib:
continue
| XML parsing in ChemIDplus exceeds memory limitations
Solution should be something like [this](https://stackoverflow.com/a/13261805)
| 2021-03-30T01:25:54 | 0.0 | [] | [] |
|||
MadryLab/cox | MadryLab__cox-10 | a20e7798606359356906a9950ce6f0dedccb71d8 | diff --git a/cox/tensorboard_view.py b/cox/tensorboard_view.py
index 23ef2fa..f7a18c8 100644
--- a/cox/tensorboard_view.py
+++ b/cox/tensorboard_view.py
@@ -51,7 +51,7 @@ def main():
except ValueError as ve:
pass
- cmd_to_run = f"tensorboard --logdir {tensorboard_arg_str} --port {args.port}"
+ cmd_to_run = f"tensorboard --logdir_spec {tensorboard_arg_str} --port {args.port}"
print(f"Running '{cmd_to_run}'")
os.system(cmd_to_run)
| Using cox-tensorboard gives empty tensorboard
I'm trying to use `cox-tensorboard` to view logs. The logs are there, since using `tensorboard` in command-line directly shows the plots. It only shows the UID, and doesn't show the parameters I'm interested in.
For instance, if after logging, I run
```cox-tensorboard --logdir logging --metadata-table metadata --format-str alg-{algorithm}```
it prints out
```Running 'tensorboard --logdir alg-algorithm_name---25e89eef-454a-4e86-b0ae-5be769968675:logging/25e89eef-454a-4e86-b0ae-5be769968675, --port 6006'```.
It seems like `tensorboard` uses logdir `alg-algorithm_name---25e89eef-454a-4e86-b0ae-5be769968675:logging/25e89eef-454a-4e86-b0ae-5be769968675,` which doesn't exist...
I have the same issue if running the `tb_example.py` file: I get the following `tensorboard` screen

It uses the following data location: `slope-1---542a8046-19be-495c-9df0-9063e878ca46:/tmp/cox_example/542a8046-19be-495c-9df0-9063e878ca46,slope-3---8e341d35-c566-4f0b-8576-9d886d8906fc:/tmp/cox_example/8e341d35-c566-4f0b-8576-9d886d8906fc,slope-2---d5c92731-6188-454e-8f8f-d595dcf3996c:/tmp/cox_example/d5c92731-6188-454e-8f8f-d595dcf3996c,`
| Replacing `--logdir` by `--logdir_spec` in `cox.tensorboard_view.py` fixed this error. | 2020-10-07T02:28:01 | 0.0 | [] | [] |
||
LorenFrankLab/spyglass | LorenFrankLab__spyglass-1074 | d4dbc232dcb474f037493d8bfbc28afa22ba9eaf | diff --git a/docs/src/ForDevelopers/UsingNWB.md b/docs/src/ForDevelopers/UsingNWB.md
index 3f68f930e..f64870930 100644
--- a/docs/src/ForDevelopers/UsingNWB.md
+++ b/docs/src/ForDevelopers/UsingNWB.md
@@ -144,9 +144,9 @@ ndx_franklab_novela.CameraDevice </b>
| Spyglass Table | Key | NWBfile Location | Config option | Notes |
| :------------- | :---------------: | ----------------------------------------: | -----------------------------------------: | ----: |
| Probe | probe_type | nwbf.devices.\<\*Probe>.probe_type | config\["Probe"\]\[index\]\["probe_type"\] | str |
-| Probe | probe_id | nwbf.devices.\<\*Probe>.probe_type | XXX | str |
-| Probe | manufacturer | nwbf.devices.\<\*Probe>.manufacturer | XXX | str |
-| Probe | probe_description | nwbf.devices.\<\*Probe>.probe_description | XXX | str |
+| Probe | probe_id | nwbf.devices.\<\*Probe>.probe_type | config\["Probe"\]\[index\]\["probe_type"\] | str |
+| Probe | manufacturer | nwbf.devices.\<\*Probe>.manufacturer | config\["Probe"\]\[index\]\["manufacturer"\] | str |
+| Probe | probe_description | nwbf.devices.\<\*Probe>.probe_description | config\["Probe"\]\[index\]\["description"\] | str |
| Probe | num_shanks | nwbf.devices.\<\*Probe>.num_shanks | XXX | int |
<b> NWBfile Location: nwbf.devices.\<\*Probe>.\<\*Shank> <br/> Object type:
@@ -154,16 +154,18 @@ ndx_franklab_novela.Shank </b>
| Spyglass Table | Key | NWBfile Location | Config option | Notes |
| :------------- | :---------: | ---------------------------------------------: | ------------: | ----: |
-| Probe.Shank | probe_shank | nwbf.devices.\<\*Probe>.\<\*Shank>.probe_shank | XXX | int |
+| Probe.Shank | probe_shank | nwbf.devices.\<\*Probe>.\<\*Shank>.probe_shank | config\["Probe"\]\[Shank\]\ | int | In the config, a list of ints |
<b> NWBfile Location: nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode> <br/>
Object type: ndx_franklab_novela.Electrode </b>
| Spyglass Table | Key | NWBfile Location | Config option | Notes |
| :-------------- | :----------: | -------------------------------------------------------------: | ------------: | ----: |
-| Probe.Electrode | probe_shank | nwbf.devices.\<\*Probe>.\<\*Shank>.probe_shank | XXX | int |
-| Probe.Electrode | contact_size | nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode>.contact_size | XXX | float |
-| Probe.Electrode | rel_x | nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode>.rel_x | XXX | float |
+| Probe.Electrode | probe_shank | nwbf.devices.\<\*Probe>.\<\*Shank>.probe_shank | config\["Probe"]\["Electrode"]\[index]\["probe_shank"] | int |
+| Probe.Electrode | contact_size | nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode>.contact_size | config\["Probe"]\["Electrode"]\[index]\["contact_size"] | float |
+| Probe.Electrode | rel_x | nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode>.rel_x | config\["Probe"]\["Electrode"]\[index]\["rel_x"] | float |
+| Probe.Electrode | rel_y | nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode>.rel_y | config\["Probe"]\["Electrode"]\[index]\["rel_y"] | float |
+| Probe.Electrode | rel_z | nwbf.devices.\<\*Probe>.\<\*Shank>.\<\*Electrode>.rel_z | config\["Probe"]\["Electrode"]\[index]\["rel_z"] | float |
<b> NWBfile Location: nwbf.epochs <br/> Object type: pynwb.epoch.TimeIntervals
</b>
@@ -213,9 +215,9 @@ hdmf.common.table.DynamicTable </b>
| :------------- | :--------------: | -----------------------------------------------: | ------------: | ----: |
| Task | task_name | nwbf.processing.tasks.\[index\].name | | |
| Task | task_description | nwbf.processing.\[index\].tasks.description | | |
-| TaskEpoch | task_name | nwbf.processing.\[index\].tasks.name | | |
-| TaskEpoch | camera_names | nwbf.processing.\[index\].tasks.camera_id | | |
-| TaskEpoch | task_environment | nwbf.processing.\[index\].tasks.task_environment | | |
+| TaskEpoch | task_name | nwbf.processing.\[index\].tasks.name | config\["Tasks"\]\[index\]\["task_name"\]| |
+| TaskEpoch | camera_names | nwbf.processing.\[index\].tasks.camera_id | config\["Tasks"\]\[index\]\["camera_id"\] | |
+| TaskEpoch | task_environment | nwbf.processing.\[index\].tasks.task_environment | config\["Tasks"\]\[index\]\["task_environment"\] | |
<b> NWBfile Location: nwbf.units </br> Object type: pynwb.misc.Units </b>
diff --git a/src/spyglass/common/common_device.py b/src/spyglass/common/common_device.py
index 19ab7ff2a..1995c2303 100644
--- a/src/spyglass/common/common_device.py
+++ b/src/spyglass/common/common_device.py
@@ -376,7 +376,9 @@ def insert_from_nwbfile(cls, nwbf, config=None):
List of probe device types found in the NWB file.
"""
config = config or dict()
- all_probes_types, ndx_probes, _ = cls.get_all_probe_names(nwbf, config)
+ all_probes_types, ndx_probes, config_probes = cls.get_all_probe_names(
+ nwbf, config
+ )
for probe_type in all_probes_types:
new_probe_type_dict = dict()
@@ -397,6 +399,16 @@ def insert_from_nwbfile(cls, nwbf, config=None):
elect_dict,
)
+ elif probe_type in config_probes:
+ cls._read_config_probe_data(
+ config,
+ probe_type,
+ new_probe_type_dict,
+ new_probe_dict,
+ shank_dict,
+ elect_dict,
+ )
+
# check that number of shanks is consistent
num_shanks = new_probe_type_dict["num_shanks"]
assert num_shanks == 0 or num_shanks == len(
@@ -405,8 +417,6 @@ def insert_from_nwbfile(cls, nwbf, config=None):
# if probe id already exists, do not overwrite anything or create
# new Shanks and Electrodes
- # TODO: test whether the Shanks and Electrodes in the NWB file match
- # the ones in the database
query = Probe & {"probe_id": new_probe_dict["probe_id"]}
if len(query) > 0:
logger.info(
@@ -414,6 +424,31 @@ def insert_from_nwbfile(cls, nwbf, config=None):
" the database. Spyglass will use that and not create a new"
" Probe, Shanks, or Electrodes."
)
+ # Test whether the Shanks and Electrodes in the NWB file match
+ # the existing database entries
+ existing_shanks = query * cls.Shank()
+ bad_shanks = [
+ shank
+ for shank in shank_dict.values()
+ if len(existing_shanks & shank) != 1
+ ]
+ if bad_shanks:
+ raise ValueError(
+ "Mismatch between nwb file and existing database "
+ + f"entry for shanks: {bad_shanks}"
+ )
+
+ existing_electrodes = query * cls.Electrode()
+ bad_electrodes = [
+ electrode
+ for electrode in elect_dict.values()
+ if len(existing_electrodes & electrode) != 1
+ ]
+ if bad_electrodes:
+ raise ValueError(
+ f"Mismatch between nwb file and existing database "
+ f"entry for electrodes: {bad_electrodes}"
+ )
continue
cls.insert1(new_probe_dict, skip_duplicates=True)
@@ -523,6 +558,66 @@ def __read_ndx_probe_data(
"rel_z": electrode.rel_z,
}
+ @classmethod
+ def _read_config_probe_data(
+ cls,
+ config,
+ probe_type,
+ new_probe_type_dict,
+ new_probe_dict,
+ shank_dict,
+ elect_dict,
+ ):
+
+ # get the list of shank keys for the probe
+ shank_list = config["Probe"][config_probes.index(probe_type)].get(
+ "Shank", []
+ )
+ for i in shank_list:
+ shank_dict[str(i)] = {"probe_id": probe_type, "probe_shank": int(i)}
+
+ # get the list of electrode keys for the probe
+ elect_dict_list = config["Probe"][config_probes.index(probe_type)].get(
+ "Electrode", []
+ )
+ for i, e in enumerate(elect_dict_list):
+ elect_dict[str(i)] = {
+ "probe_id": probe_type,
+ "probe_shank": e["probe_shank"],
+ "probe_electrode": e["probe_electrode"],
+ "contact_size": e.get("contact_size"),
+ "rel_x": e.get("rel_x"),
+ "rel_y": e.get("rel_y"),
+ "rel_z": e.get("rel_z"),
+ }
+
+ # make the probe type if not in database
+ new_probe_type_dict.update(
+ {
+ "manufacturer": config["Probe"][
+ config_probes.index(probe_type)
+ ].get("manufacturer"),
+ "probe_type": probe_type,
+ "probe_description": config["Probe"][
+ config_probes.index(probe_type)
+ ].get("probe_description"),
+ "num_shanks": len(shank_list),
+ }
+ )
+
+ cls._add_probe_type(new_probe_type_dict)
+
+ # make the probe dictionary
+ new_probe_dict.update(
+ {
+ "probe_type": probe_type,
+ "probe_id": probe_type,
+ "contact_side_numbering": config["Probe"][
+ config_probes.index(probe_type)
+ ].get("contact_side_numbering"),
+ }
+ )
+
@classmethod
def _add_probe_type(cls, new_probe_type_dict):
"""Check the probe type value against the values in the database.
diff --git a/src/spyglass/common/common_task.py b/src/spyglass/common/common_task.py
index 94ec34c58..b11b4f75a 100644
--- a/src/spyglass/common/common_task.py
+++ b/src/spyglass/common/common_task.py
@@ -7,7 +7,7 @@
from spyglass.common.common_nwbfile import Nwbfile
from spyglass.common.common_session import Session # noqa: F401
from spyglass.utils import SpyglassMixin, logger
-from spyglass.utils.nwb_helper_fn import get_nwb_file
+from spyglass.utils.nwb_helper_fn import get_config, get_nwb_file
schema = dj.schema("common_task")
@@ -105,6 +105,7 @@ def make(self, key):
nwb_file_name = key["nwb_file_name"]
nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name)
nwbf = get_nwb_file(nwb_file_abspath)
+ config = get_config(nwb_file_abspath, calling_table=self.camel_name)
camera_names = dict()
# the tasks refer to the camera_id which is unique for the NWB file but
@@ -116,13 +117,27 @@ def make(self, key):
# get the camera ID
camera_id = int(str.split(device.name)[1])
camera_names[camera_id] = device.camera_name
+ if device_list := config.get("CameraDevice"):
+ for device in device_list:
+ camera_names.update(
+ {
+ name: id
+ for name, id in zip(
+ device.get("camera_name"),
+ device.get("camera_id", -1),
+ )
+ }
+ )
# find the task modules and for each one, add the task to the Task
# schema if it isn't there and then add an entry for each epoch
tasks_mod = nwbf.processing.get("tasks")
- if tasks_mod is None:
- logger.warn(f"No tasks processing module found in {nwbf}\n")
+ config_tasks = config.get("Tasks")
+ if tasks_mod is None and config_tasks is None:
+ logger.warn(
+ f"No tasks processing module found in {nwbf} or config\n"
+ )
return
task_inserts = []
@@ -165,19 +180,72 @@ def make(self, key):
for epoch in task.task_epochs[0]:
# TODO in beans file, task_epochs[0] is 1x2 dset of ints,
# so epoch would be an int
-
key["epoch"] = epoch
- target_interval = str(epoch).zfill(2)
- for interval in session_intervals:
- if (
- target_interval in interval
- ): # TODO this is not true for the beans file
- break
- # TODO case when interval is not found is not handled
- key["interval_list_name"] = interval
+ target_interval = self.get_epoch_interval_name(
+ epoch, session_intervals
+ )
+ if target_interval is None:
+ logger.warn("Skipping epoch.")
+ continue
+ key["interval_list_name"] = target_interval
task_inserts.append(key.copy())
+
+ # Add tasks from config
+ for task in config_tasks:
+ new_key = {
+ **key,
+ "task_name": task.get("task_name"),
+ "task_environment": task.get("task_environment", None),
+ }
+ # add cameras
+ camera_ids = task.get("camera_id", [])
+ valid_camera_ids = [
+ camera_id
+ for camera_id in camera_ids
+ if camera_id in camera_names.keys()
+ ]
+ if valid_camera_ids:
+ new_key["camera_names"] = [
+ {"camera_name": camera_names[camera_id]}
+ for camera_id in valid_camera_ids
+ ]
+ session_intervals = (
+ IntervalList() & {"nwb_file_name": nwb_file_name}
+ ).fetch("interval_list_name")
+ for epoch in task.get("task_epochs", []):
+ new_key["epoch"] = epoch
+ target_interval = self.get_epoch_interval_name(
+ epoch, session_intervals
+ )
+ if target_interval is None:
+ logger.warn("Skipping epoch.")
+ continue
+ new_key["interval_list_name"] = target_interval
+ task_inserts.append(key.copy())
+
self.insert(task_inserts, allow_direct_insert=True)
+ @classmethod
+ def get_epoch_interval_name(cls, epoch, session_intervals):
+ """Get the interval name for a given epoch based on matching number"""
+ target_interval = str(epoch).zfill(2)
+ possible_targets = [
+ interval
+ for interval in session_intervals
+ if target_interval in interval
+ ]
+ if not possible_targets:
+ logger.warn(
+ f"Interval not found for epoch {epoch} in {nwb_file_name}."
+ )
+ elif len(possible_targets) > 1:
+ logger.warn(
+ f"Multiple intervals found for epoch {epoch} in {nwb_file_name}. "
+ + f"matches are {possible_targets}."
+ )
+ else:
+ return possible_targets[0]
+
@classmethod
def update_entries(cls, restrict=True):
existing_entries = (cls & restrict).fetch("KEY")
| No enforced integrity between geometry in `Probe` table and the spikesorting geometries used
**Describe the bug**
- During ingestion of an nwb file into spyglass, if an entry for a probe_name already existsin `sgc.Probe` for a probe in the nwb file, the session is linked to that rather than creating a new one
- If there is an error in probe geometry in the nwb file (or existing entry in `sgc.Probe` table) this can make a mismatch between geometry defined in nwb file and that in database.
- both v0 and v1 `SpikeSortingRecording` tables get the recording object using: ```se.read_nwb_recording(analysis_file_abs_path, load_time_vector=True)```
- Code for this in `spikeinterface=0.99.1" is [here](https://github.com/SpikeInterface/spikeinterface/blob/0.99.1/src/spikeinterface/extractors/nwbextractors.py#L578)
- This gets the geometry for the recording from the nwb file. [source]( https://github.com/SpikeInterface/spikeinterface/blob/d9531abf33cbfd213ec1a0acbdca5389e47b54d8/src/spikeinterface/extractors/nwbextractors.py#L304)
- Can create mismatch between geometry in `SpikeSortingRecording` and `sgc.Probe`
**Potential Fix**
During ingestion, check for consistency between probe geometry in nwbfile and `sgc.Probe`. Raise error if inconsitency.
Failed linking of camera defined by config and TaskEpoch during insert_session
**Describe the bug**
- `insert_session` should be compatible with using a config dictionary to define metadata
- `CameraDevice.insert_from_nwbfile(nwbf, config)` allows defining cameras with config
- `TaskEpoch.make` only defines valid camera devices as `ndx_franklab_novela.CameraDevice` objects in `nwb_file.devices`
- `TaskEpoch` can't be linked to a yaml-defined camera device
**Solution**
- config information needs to reach `TaskEpoch.make()`
|
Similar issue exists for `VideoFile._no_transaction_make()` | 2024-08-19T22:22:42 | 0.0 | [] | [] |
||
GatorGlaciology/GStatSim | GatorGlaciology__GStatSim-17 | 6f077c8bfc93b2c23afa075fd75d2303dea53297 | diff --git a/LICENSE b/LICENSE
index b073fbd..e84779c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2022 GatorGlaciology
+Copyright (c) 2022 Emma MacKie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/demos/9_cokriging_and_cosimulation_MM1.ipynb b/demos/9_cokriging_and_cosimulation_MM1.ipynb
index 202c86f..1ffac04 100644
--- a/demos/9_cokriging_and_cosimulation_MM1.ipynb
+++ b/demos/9_cokriging_and_cosimulation_MM1.ipynb
@@ -127,7 +127,7 @@
}
],
"source": [
- "df_surface = pd.read_csv('Data/greenland_surface_data.csv') # download data\n",
+ "df_surface = pd.read_csv('data/greenland_surface_data.csv') # download data\n",
"\n",
"# make hillshade plot for visualizing\n",
"res = 200\n",
@@ -635,7 +635,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.7"
+ "version": "3.10.10"
}
},
"nbformat": 4,
diff --git a/gstatsim.py b/gstatsim.py
index 096e7ab..a5f0ff4 100644
--- a/gstatsim.py
+++ b/gstatsim.py
@@ -183,12 +183,21 @@ def grid_data(df, xx, yy, zz, res):
def rbf_trend(grid_matrix, smooth_factor, res):
"""
Estimate trend using radial basis functions
- Inputs:
- grid_matrix - matrix of gridded conditioning data
- smooth_factor - regularizing parameter
- res - grid cell resolution
- Outputs:
- trend_rbf - trend estimate
+
+ Parameters
+ ----------
+ grid_matrix : numpy.ndarray
+ matrix of gridded conditioning data
+ smooth_factor : float
+ Parameter controlling smoothness of trend. Values greater than
+ zero increase the smoothness of the approximation.
+ res : float
+ grid cell resolution
+
+ Returns
+ -------
+ trend_rbf : numpy.ndarray
+ RBF trend estimate
"""
sigma = np.rint(smooth_factor/res)
ny, nx = grid_matrix.shape
@@ -455,19 +464,33 @@ def find_colocated(df1, xx1, yy1, zz1, df2, xx2, yy2, zz2):
def adaptive_partitioning(df_data, xmin, xmax, ymin, ymax, i, max_points, min_length, max_iter=None):
"""
Rercursively split clusters until they are all below max_points, but don't go smaller than min_length
- Inputs:
- df_data - DataFrame with X, Y, and K (cluster id)
- xmin - min x value of this partion
- xmax - max x value of this partion
- ymin - min y value of this partion
- ymax - max y value of this partion
- i - keeps track of total calls to this function
- max_points - all clusters will be "quartered" until points below this
- min_length - minimum side length of sqaures, preference over max_points
- max_iter - maximum iterations if worried about unending recursion
- Outputs:
- df_data - updated DataFrame with new cluster assigned the next integer
- i - number of iterations
+
+ Parameters
+ ----------
+ df_data : pandas DataFrame
+ DataFrame with X, Y, and K (cluster id) columns
+ xmin : float
+ min x value of this partion
+ xmax : float
+ max x value of this partion
+ ymin : float
+ min y value of this partion
+ ymax : float
+ max y value of this partion
+ i : int
+ keeps track of total calls to this function
+ max_points : int
+ all clusters will be "quartered" until points below this
+ min_length : float
+ minimum side length of sqaures, preference over max_points
+ max_iter : int
+ maximum iterations if worried about unending recursion
+ Returns
+ -------
+ df_data : pandas DataFrame
+ updated DataFrame with new cluster assigned
+ i : int
+ number of iterations
"""
# optional 'safety' if there is concern about runaway recursion
if max_iter is not None:
diff --git a/setup.py b/setup.py
index b8ea6a5..820bbb6 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,6 @@
from setuptools import setup
-version = '1.0.0'
+version = '1.0.2'
classifiers = [
'Programming Language :: Python :: 3',
| Reference "Data" directory should be "data"
The first line in code block 3 of Notebook 9_cokriging_and_cosimulation_MM1.ipynb the line with âData/greenland_surface_data.csvâ should be changed to âdata/greenland_surface_data.csvâ
Great resource by the way!
| 2023-04-27T17:50:21 | 0.0 | [] | [] |
|||
smarie/mkdocs-gallery | smarie__mkdocs-gallery-86 | 0f28700abc0c1505d5707ac20bde5e9e114ef111 | diff --git a/docs/changelog.md b/docs/changelog.md
index 0118acb1..61f58953 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -1,5 +1,9 @@
# Changelog
+### 0.7.11 - (in progress)
+
+ - Fixed for `README.md` that contains `html` comments. Fixes [#85](https://github.com/smarie/mkdocs-gallery/issues/85). PR [#86](https://github.com/smarie/mkdocs-gallery/pull/86) by [AntoineD](https://github.com/AntoineD).
+
### 0.7.10 - `sys.path` is not reset between code blocks
- `sys.path` modifications now persist across blocks of an example. `sys.path` is still reset after each example. PR [#82](https://github.com/smarie/mkdocs-gallery/pull/82) by [Louis-Pujol](https://github.com/Louis-Pujol).
diff --git a/docs/examples/no_output/README.md b/docs/examples/no_output/README.md
index 39bdb839..78c2d943 100644
--- a/docs/examples/no_output/README.md
+++ b/docs/examples/no_output/README.md
@@ -1,3 +1,6 @@
+<!--
+ Header here.
+-->
## No image output examples
This section gathers examples which don't produce any figures. Some examples
diff --git a/src/mkdocs_gallery/gen_single.py b/src/mkdocs_gallery/gen_single.py
index 8dbdbb15..55a64d7e 100644
--- a/src/mkdocs_gallery/gen_single.py
+++ b/src/mkdocs_gallery/gen_single.py
@@ -228,6 +228,9 @@ def extract_readme_title(file: Path, contents: str) -> str:
title : str
The readme title
"""
+ # Remove html comments.
+ contents = re.sub("(<!--.*?-->)", "", contents, flags=re.DOTALL)
+
match = FIRST_NON_MARKER_WITHOUT_HASH.search(contents)
if match is None:
raise ExtensionError(f"Could not find a title in readme file: {file}")
| Issue with header in README.md
When a `README.md` has a header (like a licence header) as a html comment, like so with one of mkdocs gallery's projects examples `docs/examples/no_output/README.md`:
```markdown
<!--
Header here.
-->
## No image output examples
This section gathers examples which don't produce any figures. Some examples
only output to standard output, others demonstrate how Mkdocs-Gallery handles
examples with errors.
```
The rendering is broken:

| 2023-11-22T11:25:58 | 0.0 | [] | [] |
|||
unioslo/tsd-api-client | unioslo__tsd-api-client-76 | 3ddb1078e178e91010669e4535f7c6b51fc7e007 | diff --git a/tsdapiclient/fileapi.py b/tsdapiclient/fileapi.py
index 4be06b1..3e76f16 100644
--- a/tsdapiclient/fileapi.py
+++ b/tsdapiclient/fileapi.py
@@ -432,7 +432,7 @@ def export_get(
bar = _init_export_progress_bar(unquote(filename), current_file_size, total_file_size, chunksize)
filename = filename if not target_dir else os.path.normpath(f'{target_dir}/{filename}')
destination_dir = os.path.dirname(filename)
- if not os.path.lexists(destination_dir):
+ if destination_dir and not os.path.lexists(destination_dir):
debug_step(f'creating directory: {destination_dir}')
os.makedirs(destination_dir)
with session.get(url, headers=headers, stream=True) as r:
| tacl download error
Fail to download file from TSD using tacl. userid and project anonymised.
```
tacl pxx --download schedule.sh
Download id: xxx....
Traceback (most recent call last):
File "/home/myuser/.local/bin/tacl", line 11, in <module>
sys.exit(cli())
File "/home/myuser/.local/lib/python3.6/site-packages/click/core.py", line 1137, in __call__
return self.main(*args, **kwargs)
File "/home/myuser/.local/lib/python3.6/site-packages/click/core.py", line 1062, in main
rv = self.invoke(ctx)
File "/home/myuser/.local/lib/python3.6/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/myuser/.local/lib/python3.6/site-packages/click/core.py", line 763, in invoke
return __callback(*args, **kwargs)
File "/home/myuser/.local/lib/python3.6/site-packages/tsdapiclient/tacl.py", line 558, in cli
export_get(env, pnum, filename, token, etag=download_id)
File "/home/myuser/.local/lib/python3.6/site-packages/tsdapiclient/tools.py", line 136, in decorator
return f(*args, **kwargs)
File "/home/myuser/.local/lib/python3.6/site-packages/tsdapiclient/fileapi.py", line 437, in export_get
os.makedirs(destination_dir)
File "/usr/lib64/python3.6/os.py", line 220, in makedirs
mkdir(name, mode)
FileNotFoundError: [Errno 2] No such file or directory: ''
```
| Hi, I found the issue and will release a fix asap. Thanks for reporting. | 2021-08-16T09:53:04 | 0.0 | [] | [] |
||
angr/angr-management | angr__angr-management-672 | 0622e9afbe085463a9eacb35dfd1df687b70a2f4 | diff --git a/angrmanagement/ui/views/hex_view.py b/angrmanagement/ui/views/hex_view.py
index 35cc048ac..3bf013e30 100644
--- a/angrmanagement/ui/views/hex_view.py
+++ b/angrmanagement/ui/views/hex_view.py
@@ -196,6 +196,7 @@ class HexGraphicsObject(QGraphicsObject):
"""
cursor_changed = Signal()
+ viewport_changed = Signal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -283,6 +284,7 @@ def set_display_offset_range(self, offset: HexAddress, num_rows: Optional[int] =
self.display_num_rows = num_rows or self.num_rows
self.display_end_addr = min(self.end_addr, self.display_start_addr + self.display_num_rows * 16)
self._update_layout()
+ self.viewport_changed.emit()
def move_viewport_to(self, addr: HexAddress, preserve_relative_offset: bool = False):
"""
@@ -1305,7 +1307,7 @@ def _reload_data(self):
self.inner_widget.set_display_start_addr(start)
self.inner_widget.hex.set_cursor(cursor)
self._update_highlight_regions_from_synchronized_views()
- self._update_highlight_regions_under_cursor()
+ self._update_cfb_highlight_regions()
self._set_highlighted_regions()
def _data_source_changed(self, index: int): # pylint:disable=unused-argument
@@ -1448,7 +1450,7 @@ def set_smart_highlighting_enabled(self, enable: bool):
Control whether smart highlighting is enabled or not.
"""
self.smart_highlighting_enabled = enable
- self._update_highlight_regions_under_cursor()
+ self._update_cfb_highlight_regions()
def _init_widgets(self):
"""
@@ -1492,6 +1494,7 @@ def _init_widgets(self):
lyt.addWidget(status_bar)
self.setLayout(lyt)
self.inner_widget.cursor_changed.connect(self.on_cursor_changed)
+ self.inner_widget.hex.viewport_changed.connect(self.on_cursor_changed)
self._widgets_initialized = True
@@ -1678,23 +1681,24 @@ def set_cursor(self, addr: int):
def on_cursor_changed(self):
"""
- Handle updates to cursor.
+ Handle updates to cursor or viewport.
"""
self.update_status_text()
- self._update_highlight_regions_under_cursor()
+ self._update_cfb_highlight_regions()
self.set_synchronized_cursor_address(self.inner_widget.hex.cursor)
def update_status_text(self):
"""
Update status text with current cursor info.
"""
- s = 'Address: %08x' % self.inner_widget.hex.cursor
sel = self.inner_widget.hex.get_selection()
- if sel is not None:
+ if sel:
minaddr, maxaddr = sel
bytes_selected = maxaddr - minaddr + 1
plural = "s" if bytes_selected != 1 else ""
s = f'Address: [{minaddr:08x}, {maxaddr:08x}], {bytes_selected} byte{plural} selected'
+ else:
+ s = 'Address: %08x' % self.inner_widget.hex.cursor
self._status_lbl.setText(s)
def keyPressEvent(self, event: PySide2.QtGui.QKeyEvent):
@@ -1733,60 +1737,39 @@ def on_synchronized_highlight_regions_changed(self):
"""
self._update_highlight_regions_from_synchronized_views()
- def _generate_highlight_regions_under_cursor(self) -> Sequence[HexHighlightRegion]:
- """
- Generate list of highlighted regions from CFB under cursor.
- """
- regions = []
-
- try:
- cfb = self.workspace.instance.cfb
- if cfb.am_none:
- return []
- item = cfb.floor_item(self.inner_widget.hex.cursor)
- except KeyError:
- item = None
- if item is None:
- return regions
-
- addr, item = item
- if self.inner_widget.hex.cursor >= (addr + item.size):
- return regions
-
- if isinstance(item, MemoryData):
- color = Conf.hex_view_string_color if item.sort == 'string' else Conf.hex_view_data_color
- regions.append(HexHighlightRegion(color, item.addr, item.size))
- elif isinstance(item, Block):
- for insn in item.disassembly.insns:
- regions.append(HexHighlightRegion(Conf.hex_view_instruction_color, insn.address, insn.size))
-
- return regions
-
- def _update_highlight_regions_under_cursor(self):
+ def _update_cfb_highlight_regions(self):
"""
Update cached list of highlight regions under cursor.
"""
- self._cfb_highlights = []
- if self.smart_highlighting_enabled:
- self._cfb_highlights.extend(self._generate_highlight_regions_under_cursor())
+ regions = []
+ cfb = self.workspace.instance.cfb
+ if self.smart_highlighting_enabled and not cfb.am_none:
+ for item in cfb.floor_items(self.inner_widget.hex.display_start_addr):
+ item_addr, item = item
+ if (item_addr + item.size) < self.inner_widget.hex.display_start_addr:
+ continue
+ if item_addr >= self.inner_widget.hex.display_end_addr:
+ break
+ if isinstance(item, MemoryData):
+ color = Conf.hex_view_string_color if item.sort == 'string' else Conf.hex_view_data_color
+ regions.append(HexHighlightRegion(color, item.addr, item.size))
+ elif isinstance(item, Block):
+ for insn in item.disassembly.insns:
+ regions.append(HexHighlightRegion(Conf.hex_view_instruction_color, insn.address, insn.size))
+ self._cfb_highlights = regions
self._set_highlighted_regions()
- def _generate_highlight_regions_from_synchronized_views(self) -> Sequence[HexHighlightRegion]:
+ def _update_highlight_regions_from_synchronized_views(self):
"""
- Generate list of highlighted regions from any synchronized views.
+ Update cached list of highlight regions from synchronized views.
"""
regions = []
for v in self.sync_state.highlight_regions:
if v is not self:
for r in self.sync_state.highlight_regions[v]:
regions.append(HexHighlightRegion(Qt.green, r.addr, r.size))
- return regions
- def _update_highlight_regions_from_synchronized_views(self):
- """
- Update cached list of highlight regions from synchronized views.
- """
- self._sync_view_highlights = self._generate_highlight_regions_from_synchronized_views()
+ self._sync_view_highlights = regions
self._set_highlighted_regions()
def _update_highlight_regions_from_patches(self):
| Make hex 'smart highlighting' show everything on screen
Current behavior: Context highlighting from CFB around cursor
Desired behavior: Highlight all objects in viewport
| 2022-05-09T23:07:22 | 0.0 | [] | [] |
|||
embeddings-benchmark/mteb | embeddings-benchmark__mteb-1398 | 76c772d7b326c6e36be6de23f0cfec7dc29fae5d | diff --git a/.gitignore b/.gitignore
index 321956049..868f0f174 100644
--- a/.gitignore
+++ b/.gitignore
@@ -143,4 +143,5 @@ sb.ipynb
tests/create_meta/model_card.md
# removed results from mteb repo they are now available at: https://github.com/embeddings-benchmark/results
-results/
\ No newline at end of file
+results/
+uv.lock
diff --git a/mteb/evaluation/evaluators/RetrievalEvaluator.py b/mteb/evaluation/evaluators/RetrievalEvaluator.py
index 54e2e0acd..4b2596c4d 100644
--- a/mteb/evaluation/evaluators/RetrievalEvaluator.py
+++ b/mteb/evaluation/evaluators/RetrievalEvaluator.py
@@ -188,7 +188,12 @@ def search(
cos_scores = self.score_functions[score_function](
query_embeddings, sub_corpus_embeddings
)
- cos_scores[torch.isnan(cos_scores)] = -1
+ is_nan = torch.isnan(cos_scores)
+ if is_nan.sum() > 0:
+ logger.warning(
+ f"Found {is_nan.sum()} NaN values in the similarity scores. Replacing NaN values with -1."
+ )
+ cos_scores[is_nan] = -1
# Get top-k values
cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk(
| RetrievalEvaluator NaN values for similarity scores
I am looking at RetrievalEvaluator.py and I see the line **cos_scores[torch.isnan(cos_scores)] = -1**
Basically, if a query-document similarity score has a NaN value, then it gets changed to -1.
My question is how would a NaN score appear when using cosine similarity as the distance metric? Would the query and document embedding vectors contain very small values when a NaN value appears?
| My guess is that it can appear as a result of an invalid model output. @Muennighoff or @orionw might know more about it? We should probably log a warning around that statement.
When you say result of an invalid model output, are you referring to the output similarity score function or the generation of text embeddings? | 2024-11-06T12:08:40 | 0.0 | [] | [] |
||
EleutherAI/lm-evaluation-harness | EleutherAI__lm-evaluation-harness-2258 | ebe7226ebfb8d11a9fb8d6b53eb65891f895c633 | diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py
index f42965ce4c..9bd8288860 100644
--- a/lm_eval/evaluator.py
+++ b/lm_eval/evaluator.py
@@ -208,7 +208,9 @@ def simple_evaluate(
)
else:
if not isinstance(model, lm_eval.api.model.LM):
- raise TypeError
+ raise TypeError(
+ f"The value of `model` passed to simple_evaluate() was of type {type(model)}, but is required to be a subclass of lm_eval.api.model.LM . This may be because you are passing an initialized Hugging Face PreTrainedModel without having wrapped it in `lm_eval.models.huggingface.HFLM(pretrained=my_model)` first."
+ )
eval_logger.info("Using pre-initialized model")
lm = model
| Raising TypeError when using simple_evaluate()
Hi, I want to test the performance of llama3.1 using lm_eval. And I using the [External Library Usage
](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage) to achieve this, but unfortunately it raises TypeError. Here is my code:
```import lm_eval
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained('/public/MountData/yaolu/LLM_pretrained/pruned_model/pruned_Llama-3.1-8B-Instruct_tail/',
trust_remote_code=True, quantization_config=bnb_config, device_map='auto'
)
lora_model = PeftModel.from_pretrained(
model,
'/public/MountData/yaolu/LLM_pretrained/pruned_model/finetuned_Qlora_alpaca_lama-3.1-8B-Instruct_tail/',
torch_dtype=torch.float16,
)
print("Applying the LoRA")
model = lora_model.merge_and_unload()
print(model)
print(type(model))
task_manager = lm_eval.tasks.TaskManager(include_path='/public/MountData/yaolu/lm-evaluation-harness/lm_eval/tasks')
results = lm_eval.simple_evaluate( # call simple_evaluate
model=model,
tasks=['arc_challenge', 'arc_easy'],
num_fewshot=0,
task_manager=task_manager,
batch_size='auto',
max_batch_size=None,
device='cuda:0',
use_cache=None,
limit=None,
check_integrity=False,
write_out=False,
gen_kwargs=None
)```
So how can I achieve this. Thanks for your reply!
| Don't click the link or run that: this user is trying to spread malware.
> Don't click the link or run that: this user is trying to spread malware.
Thanks! | 2024-08-28T13:59:38 | 0.0 | [] | [] |
||
rytilahti/python-miio | rytilahti__python-miio-1178 | 506247e8fa69eeeb25cf9846ac02f7d36f7d9486 | diff --git a/miio/integrations/vacuum/roborock/vacuumcontainers.py b/miio/integrations/vacuum/roborock/vacuumcontainers.py
index cd343af0d..9629efa94 100644
--- a/miio/integrations/vacuum/roborock/vacuumcontainers.py
+++ b/miio/integrations/vacuum/roborock/vacuumcontainers.py
@@ -186,9 +186,11 @@ def is_on(self) -> bool:
)
@property
- def is_water_box_attached(self) -> bool:
+ def is_water_box_attached(self) -> Optional[bool]:
"""Return True is water box is installed."""
- return "water_box_status" in self.data and self.data["water_box_status"] == 1
+ if "water_box_status" in self.data:
+ return self.data["water_box_status"] == 1
+ return None
@property
def is_water_box_carriage_attached(self) -> Optional[bool]:
| vacuum: return None for is_water_box_attached when unsupported
**Describe the bug**
`is_water_box_attached` incorrectly returns a boolean on non-supported devices: https://github.com/rytilahti/python-miio/blob/master/miio/vacuumcontainers.py#L189
Thanks to @OGKevin for spotting this at https://github.com/home-assistant/core/pull/57553#issuecomment-954128524
| 2021-11-03T00:44:59 | 0.0 | [] | [] |
|||
augerai/a2ml | augerai__a2ml-575 | f9912b772b3ce36f9a4a6ac22e83a748d7906b78 | diff --git a/a2ml/__init__.py b/a2ml/__init__.py
index 2df0153c..6d7674f5 100644
--- a/a2ml/__init__.py
+++ b/a2ml/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '1.0.26'
+__version__ = '1.0.27'
diff --git a/a2ml/api/auger/impl/cloud/experiment.py b/a2ml/api/auger/impl/cloud/experiment.py
index 45a6c715..0b9da4e1 100644
--- a/a2ml/api/auger/impl/cloud/experiment.py
+++ b/a2ml/api/auger/impl/cloud/experiment.py
@@ -112,6 +112,13 @@ def get_experiment_options(config, ):
if split_options:
options['splitOptions'] = split_options
+ if config.get('review/roi/filter'):
+ options['roi_metric'] = {
+ 'filter': str(config.get('review/roi/filter')),
+ 'revenue': str(config.get('review/roi/revenue')),
+ 'investment': str(config.get('review/roi/investment'))
+ }
+
return options
def get_experiment_settings(self):
diff --git a/a2ml/api/roi/calculator.py b/a2ml/api/roi/calculator.py
index d05ca02b..b88a219b 100644
--- a/a2ml/api/roi/calculator.py
+++ b/a2ml/api/roi/calculator.py
@@ -1,10 +1,10 @@
import pandas as pd
-from a2ml.api.roi.interpreter import Interpreter
-from a2ml.api.roi.lexer import Lexer
-from a2ml.api.roi.parser import Parser
-from a2ml.api.roi.validator import Validator
-from a2ml.api.roi.var_names_fetcher import VarNamesFetcher
+from .interpreter import Interpreter
+from .lexer import Lexer
+from .parser import Parser
+from .validator import Validator
+from .var_names_fetcher import VarNamesFetcher
class Calculator:
diff --git a/a2ml/api/roi/interpreter.py b/a2ml/api/roi/interpreter.py
index d2d0ac91..aad884c4 100644
--- a/a2ml/api/roi/interpreter.py
+++ b/a2ml/api/roi/interpreter.py
@@ -4,9 +4,9 @@
from operator import attrgetter
from itertools import groupby
-from a2ml.api.roi.base_interpreter import BaseInterpreter
-from a2ml.api.roi.lexer import AstError, Token
-from a2ml.api.roi.validator import Validator
+from .base_interpreter import BaseInterpreter
+from .lexer import AstError, Token
+from .validator import Validator
class InterpreterError(AstError):
pass
diff --git a/a2ml/api/roi/parser.py b/a2ml/api/roi/parser.py
index 218e9b3b..e37ea29e 100644
--- a/a2ml/api/roi/parser.py
+++ b/a2ml/api/roi/parser.py
@@ -1,4 +1,4 @@
-from a2ml.api.roi.lexer import AstError, Token
+from .lexer import AstError, Token
class ParserError(AstError):
pass
diff --git a/a2ml/api/roi/validator.py b/a2ml/api/roi/validator.py
index b9840294..072dc74a 100644
--- a/a2ml/api/roi/validator.py
+++ b/a2ml/api/roi/validator.py
@@ -1,6 +1,6 @@
-from a2ml.api.roi.base_interpreter import BaseInterpreter
-from a2ml.api.roi.lexer import AstError, Lexer
-from a2ml.api.roi.parser import Parser, TopNode
+from .base_interpreter import BaseInterpreter
+from .lexer import AstError, Lexer
+from .parser import Parser, TopNode
class ValidationError(AstError):
pass
diff --git a/a2ml/api/roi/var_names_fetcher.py b/a2ml/api/roi/var_names_fetcher.py
index e2cbe154..2bbeb87c 100644
--- a/a2ml/api/roi/var_names_fetcher.py
+++ b/a2ml/api/roi/var_names_fetcher.py
@@ -1,6 +1,6 @@
-from a2ml.api.roi.base_interpreter import BaseInterpreter
-from a2ml.api.roi.lexer import AstError, Lexer
-from a2ml.api.roi.parser import Parser, TopNode
+from .base_interpreter import BaseInterpreter
+from .lexer import AstError, Lexer
+from .parser import Parser, TopNode
class VarNamesFetcher(BaseInterpreter):
def __init__(self, expression):
diff --git a/a2ml/api/utils/context.py b/a2ml/api/utils/context.py
index 3c4899f0..16762ee4 100644
--- a/a2ml/api/utils/context.py
+++ b/a2ml/api/utils/context.py
@@ -82,7 +82,7 @@ def get_providers(self, provider = None):
if provider:
providers = provider
else:
- providers = self.config.get('providers', [])
+ providers = self.config.get('providers', ['auger'])
if isinstance(providers, (str,)):
providers = [p.strip() for p in providers.split(',')]
@@ -105,7 +105,8 @@ def get_model_provider(self, model_id):
return "auger"
def is_external_provider(self):
- return self.config.get_list('providers')[0] == 'external'
+ providers = self.get_providers()
+ return providers and providers[0] == 'external'
def copy(self, name):
"""creates a copy of an existing Context
@@ -135,7 +136,7 @@ def copy(self, name):
# In case if command run in folder without config, do not set it
pass
- if self._runs_on_server and hasattr(self, 'credentials'):
+ if hasattr(self, 'credentials'):
new.credentials = self.credentials
return new
diff --git a/setup.py b/setup.py
index 4259b613..c2faa7a6 100644
--- a/setup.py
+++ b/setup.py
@@ -86,7 +86,7 @@ def run(self):
'google-cloud-automl'
],
'predict': [
- 'auger.ai.predict==1.0.73'
+ 'auger.ai.predict==1.0.75'
]
}
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-07-02T06:37:27 | 0.0 | [] | [] |
|||
CNFeffery/feffery-antd-components | CNFeffery__feffery-antd-components-182 | d01681981f717bcc5711bb6d5d55ecbe09a29dc9 | diff --git a/feffery_antd_components/AntdDatePicker.py b/feffery_antd_components/AntdDatePicker.py
index 3e69a953..be0e4996 100644
--- a/feffery_antd_components/AntdDatePicker.py
+++ b/feffery_antd_components/AntdDatePicker.py
@@ -85,6 +85,9 @@ class AntdDatePicker(Component):
- name (string; optional):
ç¨äºå¨åºäºAntdFormç表åå¼èªå¨æéåè½ä¸ï¼å
å½å½å表å项çå段å 缺çæ¶ä¼ä»¥idä½ä¸ºå段å.
+- needConfirm (boolean; default False):
+ æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`False`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ© é»è®¤ä¸º`False`.
+
- persisted_props (list of a value equal to: 'value's; default ['value']):
Properties whose user interactions will persist after refreshing
the component or the page. Since only `value` is allowed this
@@ -154,10 +157,10 @@ class AntdDatePicker(Component):
_namespace = 'feffery_antd_components'
_type = 'AntdDatePicker'
@_explicitize_args
- def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, format=Component.UNDEFINED, picker=Component.UNDEFINED, firstDayOfWeek=Component.UNDEFINED, disabled=Component.UNDEFINED, showTime=Component.UNDEFINED, size=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, defaultPickerValue=Component.UNDEFINED, disabledDatesStrategy=Component.UNDEFINED, status=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, showToday=Component.UNDEFINED, presets=Component.UNDEFINED, clickedPreset=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
- self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'showToday', 'size', 'status', 'style', 'value', 'variant']
+ def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, format=Component.UNDEFINED, picker=Component.UNDEFINED, firstDayOfWeek=Component.UNDEFINED, disabled=Component.UNDEFINED, showTime=Component.UNDEFINED, size=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, defaultPickerValue=Component.UNDEFINED, disabledDatesStrategy=Component.UNDEFINED, status=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, showToday=Component.UNDEFINED, presets=Component.UNDEFINED, clickedPreset=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, needConfirm=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
+ self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'needConfirm', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'showToday', 'size', 'status', 'style', 'value', 'variant']
self._valid_wildcard_attributes = ['data-', 'aria-']
- self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'showToday', 'size', 'status', 'style', 'value', 'variant']
+ self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'needConfirm', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'showToday', 'size', 'status', 'style', 'value', 'variant']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
diff --git a/feffery_antd_components/AntdDateRangePicker.py b/feffery_antd_components/AntdDateRangePicker.py
index 75a71b6a..a08637a2 100644
--- a/feffery_antd_components/AntdDateRangePicker.py
+++ b/feffery_antd_components/AntdDateRangePicker.py
@@ -85,6 +85,9 @@ class AntdDateRangePicker(Component):
- name (string; optional):
ç¨äºå¨åºäºAntdFormç表åå¼èªå¨æéåè½ä¸ï¼å
å½å½å表å项çå段å 缺çæ¶ä¼ä»¥idä½ä¸ºå段å.
+- needConfirm (boolean; default False):
+ æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`False`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ© é»è®¤ä¸º`False`.
+
- open (boolean; optional)
- persisted_props (list of a value equal to: 'value's; default ['value']):
@@ -154,10 +157,10 @@ class AntdDateRangePicker(Component):
_namespace = 'feffery_antd_components'
_type = 'AntdDateRangePicker'
@_explicitize_args
- def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, format=Component.UNDEFINED, picker=Component.UNDEFINED, firstDayOfWeek=Component.UNDEFINED, disabled=Component.UNDEFINED, showTime=Component.UNDEFINED, size=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, defaultPickerValue=Component.UNDEFINED, disabledDatesStrategy=Component.UNDEFINED, open=Component.UNDEFINED, status=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, presets=Component.UNDEFINED, clickedPreset=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
- self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'open', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'size', 'status', 'style', 'value', 'variant']
+ def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, format=Component.UNDEFINED, picker=Component.UNDEFINED, firstDayOfWeek=Component.UNDEFINED, disabled=Component.UNDEFINED, showTime=Component.UNDEFINED, size=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, defaultPickerValue=Component.UNDEFINED, disabledDatesStrategy=Component.UNDEFINED, open=Component.UNDEFINED, status=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, presets=Component.UNDEFINED, clickedPreset=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, needConfirm=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
+ self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'needConfirm', 'open', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'size', 'status', 'style', 'value', 'variant']
self._valid_wildcard_attributes = ['data-', 'aria-']
- self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'open', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'size', 'status', 'style', 'value', 'variant']
+ self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'clickedPreset', 'data-*', 'defaultPickerValue', 'defaultValue', 'disabled', 'disabledDatesStrategy', 'extraFooter', 'firstDayOfWeek', 'format', 'key', 'loading_state', 'locale', 'name', 'needConfirm', 'open', 'persisted_props', 'persistence', 'persistence_type', 'picker', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'presets', 'readOnly', 'showTime', 'size', 'status', 'style', 'value', 'variant']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
diff --git a/feffery_antd_components/AntdTimePicker.py b/feffery_antd_components/AntdTimePicker.py
index 004e044e..0a9da916 100644
--- a/feffery_antd_components/AntdTimePicker.py
+++ b/feffery_antd_components/AntdTimePicker.py
@@ -64,6 +64,9 @@ class AntdTimePicker(Component):
- name (string; optional):
ç¨äºå¨åºäºAntdFormç表åå¼èªå¨æéåè½ä¸ï¼å
å½å½å表å项çå段å 缺çæ¶ä¼ä»¥idä½ä¸ºå段å.
+- needConfirm (boolean; default False):
+ æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`False`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ© é»è®¤ä¸º`False`.
+
- persisted_props (list of a value equal to: 'value's; default ['value']):
Properties whose user interactions will persist after refreshing
the component or the page. Since only `value` is allowed this
@@ -116,10 +119,10 @@ class AntdTimePicker(Component):
_namespace = 'feffery_antd_components'
_type = 'AntdTimePicker'
@_explicitize_args
- def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, format=Component.UNDEFINED, disabled=Component.UNDEFINED, hourStep=Component.UNDEFINED, minuteStep=Component.UNDEFINED, secondStep=Component.UNDEFINED, use12Hours=Component.UNDEFINED, size=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, status=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, showNow=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
- self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'showNow', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
+ def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, format=Component.UNDEFINED, disabled=Component.UNDEFINED, hourStep=Component.UNDEFINED, minuteStep=Component.UNDEFINED, secondStep=Component.UNDEFINED, use12Hours=Component.UNDEFINED, size=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, status=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, showNow=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, needConfirm=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
+ self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'needConfirm', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'showNow', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
self._valid_wildcard_attributes = ['data-', 'aria-']
- self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'showNow', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
+ self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'needConfirm', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'showNow', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
diff --git a/feffery_antd_components/AntdTimeRangePicker.py b/feffery_antd_components/AntdTimeRangePicker.py
index 79ec6c43..511ea9a9 100644
--- a/feffery_antd_components/AntdTimeRangePicker.py
+++ b/feffery_antd_components/AntdTimeRangePicker.py
@@ -64,6 +64,9 @@ class AntdTimeRangePicker(Component):
- name (string; optional):
ç¨äºå¨åºäºAntdFormç表åå¼èªå¨æéåè½ä¸ï¼å
å½å½å表å项çå段å 缺çæ¶ä¼ä»¥idä½ä¸ºå段å.
+- needConfirm (boolean; default False):
+ æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`False`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ© é»è®¤ä¸º`False`.
+
- open (boolean; optional)
- persisted_props (list of a value equal to: 'value's; default ['value']):
@@ -116,10 +119,10 @@ class AntdTimeRangePicker(Component):
_namespace = 'feffery_antd_components'
_type = 'AntdTimeRangePicker'
@_explicitize_args
- def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, hourStep=Component.UNDEFINED, minuteStep=Component.UNDEFINED, secondStep=Component.UNDEFINED, format=Component.UNDEFINED, use12Hours=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, disabled=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, size=Component.UNDEFINED, open=Component.UNDEFINED, status=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
- self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'open', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
+ def __init__(self, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, popupClassName=Component.UNDEFINED, key=Component.UNDEFINED, name=Component.UNDEFINED, locale=Component.UNDEFINED, hourStep=Component.UNDEFINED, minuteStep=Component.UNDEFINED, secondStep=Component.UNDEFINED, format=Component.UNDEFINED, use12Hours=Component.UNDEFINED, allowClear=Component.UNDEFINED, autoFocus=Component.UNDEFINED, placeholder=Component.UNDEFINED, placement=Component.UNDEFINED, disabled=Component.UNDEFINED, value=Component.UNDEFINED, defaultValue=Component.UNDEFINED, bordered=Component.UNDEFINED, variant=Component.UNDEFINED, size=Component.UNDEFINED, open=Component.UNDEFINED, status=Component.UNDEFINED, readOnly=Component.UNDEFINED, extraFooter=Component.UNDEFINED, popupContainer=Component.UNDEFINED, batchPropsNames=Component.UNDEFINED, batchPropsValues=Component.UNDEFINED, needConfirm=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
+ self._prop_names = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'needConfirm', 'open', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
self._valid_wildcard_attributes = ['data-', 'aria-']
- self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'open', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
+ self.available_properties = ['id', 'allowClear', 'aria-*', 'autoFocus', 'batchPropsNames', 'batchPropsValues', 'bordered', 'className', 'data-*', 'defaultValue', 'disabled', 'extraFooter', 'format', 'hourStep', 'key', 'loading_state', 'locale', 'minuteStep', 'name', 'needConfirm', 'open', 'persisted_props', 'persistence', 'persistence_type', 'placeholder', 'placement', 'popupClassName', 'popupContainer', 'readOnly', 'secondStep', 'size', 'status', 'style', 'use12Hours', 'value', 'variant']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
diff --git a/src/lib/components/dataEntry/AntdDatePicker.react.js b/src/lib/components/dataEntry/AntdDatePicker.react.js
index 3ea06d7b..86294748 100644
--- a/src/lib/components/dataEntry/AntdDatePicker.react.js
+++ b/src/lib/components/dataEntry/AntdDatePicker.react.js
@@ -192,6 +192,12 @@ AntdDatePicker.propTypes = {
*/
'aria-*': PropTypes.string,
+ /**
+ * æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`false`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ©
+ * é»è®¤ä¸º`false`
+ */
+ needConfirm: PropTypes.bool,
+
/**
* Object that holds the loading state object coming from dash-renderer
*/
@@ -258,6 +264,7 @@ AntdDatePicker.defaultProps = {
locale: 'zh-cn',
placement: 'bottomLeft',
showToday: true,
+ needConfirm: false,
popupContainer: 'body',
persisted_props: ['value'],
persistence_type: 'local',
diff --git a/src/lib/components/dataEntry/AntdDateRangePicker.react.js b/src/lib/components/dataEntry/AntdDateRangePicker.react.js
index f58d81df..c4410bed 100644
--- a/src/lib/components/dataEntry/AntdDateRangePicker.react.js
+++ b/src/lib/components/dataEntry/AntdDateRangePicker.react.js
@@ -196,6 +196,12 @@ AntdDateRangePicker.propTypes = {
*/
'aria-*': PropTypes.string,
+ /**
+ * æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`false`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ©
+ * é»è®¤ä¸º`false`
+ */
+ needConfirm: PropTypes.bool,
+
/**
* Object that holds the loading state object coming from dash-renderer
*/
@@ -259,6 +265,7 @@ AntdDateRangePicker.defaultProps = {
bordered: true,
allowClear: true,
autoFocus: false,
+ needConfirm: false,
persisted_props: ['value'],
persistence_type: 'local',
locale: 'zh-cn',
diff --git a/src/lib/components/dataEntry/AntdTimePicker.react.js b/src/lib/components/dataEntry/AntdTimePicker.react.js
index 68574d0d..9eb94178 100644
--- a/src/lib/components/dataEntry/AntdTimePicker.react.js
+++ b/src/lib/components/dataEntry/AntdTimePicker.react.js
@@ -128,6 +128,12 @@ AntdTimePicker.propTypes = {
*/
'aria-*': PropTypes.string,
+ /**
+ * æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`false`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ©
+ * é»è®¤ä¸º`false`
+ */
+ needConfirm: PropTypes.bool,
+
/**
* Object that holds the loading state object coming from dash-renderer
*/
@@ -196,6 +202,7 @@ AntdTimePicker.defaultProps = {
bordered: true,
size: 'middle',
showNow: true,
+ needConfirm: false,
persisted_props: ['value'],
persistence_type: 'local',
locale: 'zh-cn',
diff --git a/src/lib/components/dataEntry/AntdTimeRangePicker.react.js b/src/lib/components/dataEntry/AntdTimeRangePicker.react.js
index c93e6824..ba10134e 100644
--- a/src/lib/components/dataEntry/AntdTimeRangePicker.react.js
+++ b/src/lib/components/dataEntry/AntdTimeRangePicker.react.js
@@ -127,6 +127,12 @@ AntdTimeRangePicker.propTypes = {
*/
'aria-*': PropTypes.string,
+ /**
+ * æ¯å¦éè¦ç¡®è®¤æé®ï¼ä¸º`false`æ¶å¤±å»ç¦ç¹å³ä»£è¡¨éæ©
+ * é»è®¤ä¸º`false`
+ */
+ needConfirm: PropTypes.bool,
+
/**
* Object that holds the loading state object coming from dash-renderer
*/
@@ -193,6 +199,7 @@ AntdTimeRangePicker.defaultProps = {
bordered: true,
size: 'middle',
format: 'HH:mm:ss',
+ needConfirm: false,
persisted_props: ['value'],
persistence_type: 'local',
locale: 'zh-cn',
diff --git a/src/lib/fragments/dataEntry/AntdDatePicker.react.js b/src/lib/fragments/dataEntry/AntdDatePicker.react.js
index aeb3f0a8..edac3af1 100644
--- a/src/lib/fragments/dataEntry/AntdDatePicker.react.js
+++ b/src/lib/fragments/dataEntry/AntdDatePicker.react.js
@@ -55,7 +55,8 @@ const AntdDatePicker = (props) => {
persisted_props,
persistence_type,
loading_state,
- batchPropsNames
+ batchPropsNames,
+ needConfirm
} = props;
const [rawValue, setRawValue] = useState(null);
@@ -488,6 +489,7 @@ const AntdDatePicker = (props) => {
inputReadOnly={readOnly}
renderExtraFooter={() => extraFooter}
showNow={showToday}
+ needConfirm={needConfirm}
presets={
// å¤çé¢è®¾å¿«æ·é项å表
(presets || []).map(
diff --git a/src/lib/fragments/dataEntry/AntdDateRangePicker.react.js b/src/lib/fragments/dataEntry/AntdDateRangePicker.react.js
index 8d89c257..59f99a09 100644
--- a/src/lib/fragments/dataEntry/AntdDateRangePicker.react.js
+++ b/src/lib/fragments/dataEntry/AntdDateRangePicker.react.js
@@ -57,7 +57,8 @@ const AntdDateRangePicker = (props) => {
persisted_props,
persistence_type,
loading_state,
- batchPropsNames
+ batchPropsNames,
+ needConfirm
} = props;
const [rawValue, setRawValue] = useState(null);
@@ -511,6 +512,7 @@ const AntdDateRangePicker = (props) => {
onOpenChange={(e) => setProps({ open: e })}
inputReadOnly={readOnly}
renderExtraFooter={() => extraFooter}
+ needConfirm={needConfirm}
presets={
// å¤çé¢è®¾å¿«æ·é项å表
(presets || []).map(
diff --git a/src/lib/fragments/dataEntry/AntdTimePicker.react.js b/src/lib/fragments/dataEntry/AntdTimePicker.react.js
index 1373324f..56beccb9 100644
--- a/src/lib/fragments/dataEntry/AntdTimePicker.react.js
+++ b/src/lib/fragments/dataEntry/AntdTimePicker.react.js
@@ -47,7 +47,8 @@ const AntdTimePicker = (props) => {
persisted_props,
persistence_type,
loading_state,
- batchPropsNames
+ batchPropsNames,
+ needConfirm
} = props;
// æ¹å±æ§çå¬
@@ -158,6 +159,7 @@ const AntdTimePicker = (props) => {
status={status}
renderExtraFooter={() => extraFooter}
showNow={showNow}
+ needConfirm={needConfirm}
persistence={persistence}
persisted_props={persisted_props}
persistence_type={persistence_type}
diff --git a/src/lib/fragments/dataEntry/AntdTimeRangePicker.react.js b/src/lib/fragments/dataEntry/AntdTimeRangePicker.react.js
index 1a3caa49..cba51abb 100644
--- a/src/lib/fragments/dataEntry/AntdTimeRangePicker.react.js
+++ b/src/lib/fragments/dataEntry/AntdTimeRangePicker.react.js
@@ -48,7 +48,8 @@ const AntdTimeRangePicker = (props) => {
persisted_props,
persistence_type,
loading_state,
- batchPropsNames
+ batchPropsNames,
+ needConfirm
} = props;
// æ¹å±æ§çå¬
@@ -178,6 +179,7 @@ const AntdTimeRangePicker = (props) => {
}
status={status}
renderExtraFooter={() => extraFooter}
+ needConfirm={needConfirm}
persistence={persistence}
persisted_props={persisted_props}
persistence_type={persistence_type}
diff --git a/usage.py b/usage.py
index 339f3058..2384f9c0 100644
--- a/usage.py
+++ b/usage.py
@@ -6,14 +6,10 @@
app.layout = html.Div(
[
- fac.AntdCarousel(
- [
- fac.AntdImage(
- src='https://fac.feffery.tech//assets/imgs/%E6%B5%81%E6%B5%AA%E5%9C%B0%E7%90%832%E6%B5%B7%E6%8A%A5.jpg'
- ) for i in range(3)
- ],
- arrows=True
- )
+ fac.AntdDatePicker(needConfirm=True),
+ fac.AntdDateRangePicker(needConfirm=True),
+ fac.AntdTimePicker(needConfirm=True),
+ fac.AntdTimeRangePicker(needConfirm=True)
],
style={'padding': '50px 100px'},
)
| [TODO] æ¥æéæ©ãæ¶é´éæ©ç¸å
³ç»ä»¶æ°å¢needConfirmåæ°
[TODO] æ¥æéæ©ãæ¶é´éæ©ç¸å
³ç»ä»¶æ°å¢needConfirmåæ°
| 2024-05-14T02:12:48 | 0.0 | [] | [] |
|||
pymodbus-dev/pymodbus | pymodbus-dev__pymodbus-2369 | 317f3c859fcb82b3481592289f930859aad58f59 | diff --git a/pymodbus/client/mixin.py b/pymodbus/client/mixin.py
index 995d72110..1c7351c3b 100644
--- a/pymodbus/client/mixin.py
+++ b/pymodbus/client/mixin.py
@@ -111,7 +111,7 @@ def write_coil(self, address: int, value: bool, slave: int = 1) -> T:
"""
return self.execute(pdu_bit_write.WriteSingleCoilRequest(address, value, slave=slave))
- def write_register(self, address: int, value: bytes, slave: int = 1) -> T:
+ def write_register(self, address: int, value: bytes | int, slave: int = 1) -> T:
"""Write register (code 0x06).
:param address: Address to write to
@@ -322,7 +322,7 @@ def write_coils(
)
def write_registers(
- self, address: int, values: list[bytes], slave: int = 1, skip_encode: bool = False) -> T:
+ self, address: int, values: list[bytes | int], slave: int = 1, skip_encode: bool = False) -> T:
"""Write registers (code 0x10).
:param address: Start address to write to
diff --git a/pymodbus/pdu/register_write_message.py b/pymodbus/pdu/register_write_message.py
index 6a1abb3aa..352276700 100644
--- a/pymodbus/pdu/register_write_message.py
+++ b/pymodbus/pdu/register_write_message.py
@@ -36,7 +36,7 @@ def encode(self):
:returns: The encoded packet
"""
packet = struct.pack(">H", self.address)
- if self.skip_encode:
+ if self.skip_encode or isinstance(self.value, bytes):
packet += self.value
else:
packet += struct.pack(">H", self.value)
@@ -178,7 +178,10 @@ def encode(self):
return packet + b"".join(self.values)
for value in self.values:
- packet += struct.pack(">H", value)
+ if isinstance(value, bytes):
+ packet += value
+ else:
+ packet += struct.pack(">H", value)
return packet
| struct.error: required argument is not an integer
### Versions
- Python: 3.12
- OS: macOS 15
- Pymodbus: 3.7.3
- Modbus Hardware (if used):
### Description
<!-- What were you trying, what has happened, what went wrong, and what did you expect? -->
The type of the Client methods `write_register` (and `write_registers`) was changed in `client/mixin.py` (#2309). Now the value to be sent should be of type `bytes` instead of `int` as in the previous releases.
Converting the int `value` to bytes using `value.to_bytes(2, 'big')` makes mypy happy, but results in
```
File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pymodbus/client/base.py", line 104, in async_execute
packet = self.ctx.framer.buildPacket(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pymodbus/framer/base.py", line 85, in buildPacket
data = message.function_code.to_bytes(1,'big') + message.encode()
^^^^^^^^^^^^^^^^
File "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pymodbus/pdu/register_write_message.py", line 42, in encode
packet += struct.pack(">H", self.value)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
struct.error: required argument is not an integer
```
Sending the int value as before works, but makes mypy sad.
What to do?
| 2024-10-10T10:31:03 | 0.0 | [] | [] |
|||
quic/aimet | quic__aimet-2736 | e69108929d90e49ef49a1b6781bfe190b9ade518 | diff --git a/TrainingExtensions/torch/src/python/aimet_torch/qc_quantize_op.py b/TrainingExtensions/torch/src/python/aimet_torch/qc_quantize_op.py
index 8bb424a849..9116a341d0 100644
--- a/TrainingExtensions/torch/src/python/aimet_torch/qc_quantize_op.py
+++ b/TrainingExtensions/torch/src/python/aimet_torch/qc_quantize_op.py
@@ -359,12 +359,14 @@ def enable_per_channel_quantization(self):
supported for learned-grid
"""
- def set_activation_encoding(self, module_name: str, activation_encodings: Dict):
+ def set_activation_encoding(self, module_name: str, activation_encodings: Dict, ignore_when_quantizer_disabled: bool = False):
"""
Set encoding for activations from encodings dictionary
:param module_name: name of module
:param activation_encodings: activation encodings dictionary
+ :param ignore_when_quantizer_disabled: ignore raising RuntimeError while setting encodings,
+ when quantizers are disabled.
"""
_logger.info("Setting quantization encodings for activation quantizers of: %s", module_name)
@@ -373,14 +375,14 @@ def set_activation_encoding(self, module_name: str, activation_encodings: Dict):
except KeyError:
input_encoding = {}
- self.import_input_encodings(input_encoding)
+ self.import_input_encodings(input_encoding, ignore_when_quantizer_disabled)
try:
output_encoding = activation_encodings[module_name]['output']
except KeyError:
output_encoding = {}
- self.import_output_encodings(output_encoding)
+ self.import_output_encodings(output_encoding, ignore_when_quantizer_disabled)
def set_param_encoding(self, module_name: str, param_encodings: Dict):
"""
@@ -416,10 +418,10 @@ def freeze_activation_encoding(self, name: str, activation_encoding: Dict):
"""
for input_quantizer, output_quantizer in zip(self.input_quantizers, self.output_quantizers):
if name in activation_encoding:
- if QUANTIZER_TYPE_INPUT in activation_encoding[name]:
+ if QUANTIZER_TYPE_INPUT in activation_encoding[name] and input_quantizer.enabled:
input_quantizer.freeze_encoding()
_logger.info("Freezing quantization encodings for input activation: %s", name)
- if QUANTIZER_TYPE_OUTPUT in activation_encoding[name]:
+ if QUANTIZER_TYPE_OUTPUT in activation_encoding[name] and output_quantizer.enabled:
output_quantizer.freeze_encoding()
_logger.info("Freezing quantization encodings for output activation: %s", name)
@@ -501,7 +503,7 @@ def import_param_encodings(self, encodings: Dict[str, List[Dict]]):
self.set_mode(QcQuantizeOpMode.ACTIVE)
- def import_output_encodings(self, encodings: Dict[str, Dict]):
+ def import_output_encodings(self, encodings: Dict[str, Dict], ignore_when_quantizer_disabled: bool = False):
"""
Import output encodings represented in below format:
{
@@ -510,9 +512,9 @@ def import_output_encodings(self, encodings: Dict[str, Dict]):
...
}
"""
- self._import_encoding(encodings, self.output_quantizers)
+ self._import_encoding(encodings, self.output_quantizers, ignore_when_quantizer_disabled)
- def import_input_encodings(self, encodings: Dict[str, Dict]):
+ def import_input_encodings(self, encodings: Dict[str, Dict], ignore_when_quantizer_disabled: bool = False):
"""
Import input encodings represented in below format:
{
@@ -521,9 +523,9 @@ def import_input_encodings(self, encodings: Dict[str, Dict]):
...
}
"""
- self._import_encoding(encodings, self.input_quantizers)
+ self._import_encoding(encodings, self.input_quantizers, ignore_when_quantizer_disabled)
- def _import_encoding(self, encodings, quantizers):
+ def _import_encoding(self, encodings, quantizers, ignore_when_quantizer_disabled):
assert quantizers is self.input_quantizers or quantizers is self.output_quantizers
for i, quantizer in enumerate(quantizers):
@@ -532,8 +534,15 @@ def _import_encoding(self, encodings, quantizers):
quantizer.enabled = False
continue
if not quantizer.enabled:
- raise RuntimeError("The quantsim passed for loading encodings does not have the same "
- "configuration as the quantsim which was used to export the encodings")
+ if ignore_when_quantizer_disabled:
+ type_of_quantizer = 'input' if quantizers is self.input_quantizers else 'output'
+ _logger.info("%s quantizer %s is disabled, and the provided encoding can't be set",
+ type_of_quantizer, str(i))
+ continue
+ else:
+ raise RuntimeError("The quantsim passed for loading encodings does not have the same "
+ "configuration as the quantsim which was used to export the encodings")
+
if quantizer._is_encoding_frozen: # pylint: disable=protected-access
type_of_quantizer = 'input' if quantizers is self.input_quantizers else 'output'
_logger.debug("Encodings are frozen for module %s quantizer of %s",
diff --git a/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py b/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py
index 7e24400d58..11ab7daef8 100644
--- a/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py
+++ b/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py
@@ -1598,12 +1598,14 @@ def configure_quantization_ops(self, config_file: str, default_output_bw: int, d
return QuantSimConfigurator(self.model, self.connected_graph, config_file, default_output_bw,
default_param_bw, default_data_type)
- def load_and_freeze_encodings(self, encoding_path: str):
+ def load_and_freeze_encodings(self, encoding_path: str, ignore_when_quantizer_disabled: bool = False):
"""
Functionality to set encodings (both activation and parameter) as per the given encodings JSON file and
freeze them.
:param encoding_path: JSON file path from where to load the encodings.
+ :param ignore_when_quantizer_disabled: ignore raising RuntimeError while setting encodings,
+ when quantizers are disabled.
"""
with open(encoding_path, mode='r') as json_file:
encodings_dict = json.load(json_file)
@@ -1616,7 +1618,7 @@ def load_and_freeze_encodings(self, encoding_path: str):
module.set_param_encoding(name, params_encoding)
module.freeze_param_encoding(name, params_encoding)
- module.set_activation_encoding(name, activation_encoding)
+ module.set_activation_encoding(name, activation_encoding, ignore_when_quantizer_disabled=ignore_when_quantizer_disabled)
module.freeze_activation_encoding(name, activation_encoding)
def set_and_freeze_param_encodings(self, encoding_path: str):
| Optionally ignore AssertionError while setting activation_encodings in load_and_freeze_encodings API
| 2024-02-13T09:08:28 | 0.0 | [] | [] |
|||
gbouvignies/ChemEx | gbouvignies__ChemEx-245 | 0cef75cda2da9d52700cdd2ed106f1249ddd9320 | diff --git a/chemex/parameters/spin_system/atom.py b/chemex/parameters/spin_system/atom.py
index 9649577c..e47da978 100644
--- a/chemex/parameters/spin_system/atom.py
+++ b/chemex/parameters/spin_system/atom.py
@@ -3,7 +3,9 @@
from __future__ import annotations
from collections.abc import Hashable
+from copy import deepcopy
from dataclasses import dataclass, field
+from typing import Self
from .constants import CORRECT_ATOM_NAME
from .nucleus import Nucleus, str2nucleus
@@ -69,3 +71,37 @@ def __str__(self) -> str:
str: The name of the atom.
"""
return self.name
+
+ def __deepcopy__(self, memo: dict[int, Self]) -> Self:
+ """Creates a deep copy of the Atom instance.
+
+ Args:
+ memo (dict[int, Self]): A dictionary of memoized objects.
+
+ Returns:
+ Self: A deep copy of the Atom instance.
+ """
+ if id(self) in memo:
+ return memo[id(self)]
+
+ # Create a new instance of Atom without calling __init__
+ cls = self.__class__
+ new_atom = cls.__new__(cls)
+
+ # Copy all attributes to the new instance
+ new_atom.name = deepcopy(self.name, memo)
+ new_atom.nucleus = deepcopy(self.nucleus, memo)
+
+ # Copy search_keys excluding self
+ new_search_keys = deepcopy(
+ {key for key in self.search_keys if key is not self}, memo
+ )
+ new_atom.search_keys = new_search_keys
+
+ # Add the new_atom to its own search_keys set
+ new_atom.search_keys.add(new_atom)
+
+ # Memoize the new instance
+ memo[id(self)] = new_atom
+
+ return new_atom
diff --git a/chemex/parameters/spin_system/group.py b/chemex/parameters/spin_system/group.py
index e61a49fc..8c6630fc 100644
--- a/chemex/parameters/spin_system/group.py
+++ b/chemex/parameters/spin_system/group.py
@@ -1,8 +1,10 @@
from __future__ import annotations
from collections.abc import Hashable
+from copy import deepcopy
from functools import cache, total_ordering
from re import search
+from typing import Self
from .constants import AAA_TO_A
@@ -113,7 +115,7 @@ def __hash__(self) -> int:
Returns:
int: The hash value of the group.
"""
- return hash(self.name)
+ return hash((self.symbol, self.number, self.suffix))
def __bool__(self) -> bool:
"""Boolean representation of the Group object.
@@ -130,3 +132,38 @@ def __str__(self) -> str:
str: The full name of the group.
"""
return self.name
+
+ def __deepcopy__(self, memo: dict[int, Self]) -> Self:
+ """Deep copy of the Group object.
+
+ Args:
+ memo (dict[int, object]): A dictionary for tracking copied objects.
+
+ Returns:
+ Group: A deep copy of the group.
+ """
+ if id(self) in memo:
+ return memo[id(self)]
+
+ # Create a new instance of Group without calling __init__
+ cls = self.__class__
+ new_group = cls.__new__(cls)
+
+ # Deep copy all attributes to the new instance
+ new_group.symbol = deepcopy(self.symbol, memo)
+ new_group.number = deepcopy(self.number, memo)
+ new_group.suffix = deepcopy(self.suffix, memo)
+
+ # Copy search_keys excluding self
+ new_search_keys = deepcopy(
+ {key for key in self.search_keys if key is not self}, memo
+ )
+ new_group.search_keys = new_search_keys
+
+ # Add the new_group to its own search_keys set
+ new_group.search_keys.add(new_group)
+
+ # Memoize the new instance
+ memo[id(self)] = new_group
+
+ return new_group
| Regular expressions
It would be useful to have the ability to use regular expressions in the parameter and methods files. For instance, instead of saying:
dw,I019HD = fix
dw,I111HD = fix
dw,I213HD = fix
...
using
dw,I.+HD = fix
Fitting errors in Chemex v2024.05.2 with MC and BS syntaxes
I got the following errors when fitting CPMG relaxation dispersion data using Chemex v2024.05.2.
Error 1:
I wanted to run Chemex fitting using the following command, but I have an error in STEP2. Please note that there was no issue when I ran chemex fitting without MC and BS syntaxes, for example, by removing STEP2 from the following method file.
chemex fit -e Experiments\600mhz.toml Experiments\800mhz.toml -m Method\method.toml -p Parameters\parameters.toml -o Output
The method.toml (below) was prepared using the instructions described on your GitHub page.
https://github.com/gbouvignies/ChemEx/blob/main/website/docs/user_guide/fitting/method_files.md
https://github.com/search?q=repo%3Agbouvignies%2FChemEx%20mc&type=code
Error 2:
Not sure why â--bs" or â--mc" commands are considered as unrecognized arguments in the latest version of Chemex. Please find the errors below. Please note â--mc 10â also does have a similar error.
chemex fit -e Experiments\600mhz.toml Experiments\800mhz.toml -p Parameters\parameters.toml --bs 10 -o Output
chemex: error: unrecognized arguments: --bs 10
| This is definitely much needed, not trivial to implement though. I'll give it some thought.
Got it to work in the next (not yet available) version...
Is this in 0.6.1, or just point_to_profile?
This is just point_to_profile...
Your comment makes me realize that the current solution does not exactly allow what you asked in your first comment.
Now, what can be done is selecting specific nuclei. For instance:
dw_ab, NUC->HD = fix
But it is not possible to select 'HD' nuclei of all isoleucine residues, as in your example. Something to work on.
As mentioned in the previous comment, it is possible to select parameters depending on the same atom type (e.g. "NUC->H) or nucleus site (e.g. "NUC->HD"). I don't plan on implementing anything further than that (like selecting based on the residue type).
I'm closing the issue, but feel free to re-open it if you feel like it is an important feature to have.
Well, for error 2, the short answer is that syntax isn't supported anymore. Statistics should be specified in the method as detailed in the other file you linked to.
That said, I'm not sure why that one isn't working. What error are you getting in "Error 1"? | 2024-06-13T19:47:50 | 0.0 | [] | [] |
||
esphome/aioesphomeapi | esphome__aioesphomeapi-401 | 9439cc4fcde023a0e73a2b11e1379558f5ff53b4 | diff --git a/aioesphomeapi/connection.py b/aioesphomeapi/connection.py
index 2aa8a43a..f9c0c9ac 100644
--- a/aioesphomeapi/connection.py
+++ b/aioesphomeapi/connection.py
@@ -51,7 +51,7 @@
BUFFER_SIZE = 1024 * 1024 # Set buffer limit to 1MB
-INTERNAL_MESSAGE_TYPES = {GetTimeRequest, PingRequest, PingResponse, DisconnectRequest}
+INTERNAL_MESSAGE_TYPES = {GetTimeRequest, PingRequest, DisconnectRequest}
PING_REQUEST_MESSAGE = PingRequest()
PING_RESPONSE_MESSAGE = PingResponse()
@@ -679,6 +679,11 @@ def _process_packet(self, pkt: Packet) -> None:
_LOGGER.debug("%s: Got message of type %s: %s", self.log_name, msg_type, msg)
+ if self._pong_timer:
+ # Any valid message from the remote cancels the pong timer
+ # as we know the connection is still alive
+ self._async_cancel_pong_timer()
+
for handler in self._message_handlers.get(msg_type, [])[:]:
handler(msg)
@@ -687,11 +692,7 @@ def _process_packet(self, pkt: Packet) -> None:
if msg_type not in INTERNAL_MESSAGE_TYPES:
return
- if msg_type is PingResponse:
- # We got a pong so we know the ESP is alive, cancel the timer
- # that will disconnect us
- self._async_cancel_pong_timer()
- elif msg_type is DisconnectRequest:
+ if msg_type is DisconnectRequest:
self.send_message(DisconnectResponse())
self._connection_state = ConnectionState.CLOSED
self._expected_disconnect = True
| Any message received from remote should cancel the pong timer
https://github.com/esphome/aioesphomeapi/blob/9439cc4fcde023a0e73a2b11e1379558f5ff53b4/aioesphomeapi/connection.py#L693
It doesn't matter which message type we get back. Any valid protobuf should cancel the disconnect timer.
| 2023-03-08T21:23:44 | 0.0 | [] | [] |
|||
antarctica/GeoPlot | antarctica__GeoPlot-44 | e91fbd59eee4e033207d49a3fdcde04143adf34d | diff --git a/bas_geoplot/interactive.py b/bas_geoplot/interactive.py
index 705dc26..e080a11 100644
--- a/bas_geoplot/interactive.py
+++ b/bas_geoplot/interactive.py
@@ -209,7 +209,7 @@ def show(self):
"""
self._add_plots_map()
- folium.LayerControl(collapsed=True).add_to(self.map)
+ folium.LayerControl('topleft', collapsed=True).add_to(self.map)
return self.map
def save(self,file):
| Enabling/Disabling layers shouldn't move interface
When enabling or disabling a layer (e.g. SIC), the legend appears at the top left, which pushes the whole interface down. This makes it really annoying to toggle one layer on and off.
Would be good if we could move the colourmap legend to the top left instead (or bottom right/left, just out of the way).
| This is a relatively simple fix, we can just move the layer control to the top left where it won't be affected by the number of legends as below:

| 2023-03-30T13:44:17 | 0.0 | [] | [] |
||
XingangPan/DragGAN | XingangPan__DragGAN-91 | 651edb9428227aa0993487c3763b14948885fd29 | diff --git a/viz/drag_widget.py b/viz/drag_widget.py
index 912b378..aa0c454 100644
--- a/viz/drag_widget.py
+++ b/viz/drag_widget.py
@@ -90,6 +90,7 @@ def load_points(self, suffix):
@imgui_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
+ reset = False
if show:
with imgui_utils.grayed_out(self.disabled_time != 0):
imgui.text('Drag')
| UnboundLocalError: local variable 'reset' referenced before assignment
In the GUI, if you try to minimize the "Drag" button, it will return UnboundLocalError. To reproduce just run the gui.sh script and try to minimize the "Drag" button.
`py:146: UserWarning: A NumPy version >=1.16.5 and <1.23.0 is required for this version of SciPy (detected version 1.24.3
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
Loading "/home/belal/personal/DragGAN/checkpoints/stylegan2_lions_512_pytorch.pkl"... Done.
()
{'z_dim': 512, 'c_dim': 0, 'w_dim': 512, 'img_resolution': 512, 'img_channels': 3, 'mapping_kwargs': {'num_layers': 8, 'embed_features': None, 'layer_features': None, 'activation': 'lrelu', 'lr_multiplier': 0.01, 'w_avg_beta': 0.995}, 'synthesis_kwargs': {'channel_base': 32768, 'channel_max': 512, 'num_fp16_res': 0, 'conv_clamp': None, 'architecture': 'skip', 'resample_filter': [1, 3, 3, 1], 'use_noise': True, 'activation': 'lrelu'}}
Setting up PyTorch plugin "bias_act_plugin"... Done.
Setting up PyTorch plugin "upfirdn2d_plugin"... Done.
Traceback (most recent call last):
File "/home/belal/personal/DragGAN/visualizer_drag.py", line 402, in <module>
main()
File "/home/belal/anaconda3/envs/stylegan3/lib/python3.9/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/home/belal/anaconda3/envs/stylegan3/lib/python3.9/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/home/belal/anaconda3/envs/stylegan3/lib/python3.9/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/belal/anaconda3/envs/stylegan3/lib/python3.9/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/home/belal/personal/DragGAN/visualizer_drag.py", line 396, in main
viz.draw_frame()
File "/home/belal/personal/DragGAN/visualizer_drag.py", line 158, in draw_frame
self.drag_widget(expanded)
File "/home/belal/personal/DragGAN/gui_utils/imgui_utils.py", line 83, in decorator
res = method(self, *args, **kwargs)
File "/home/belal/personal/DragGAN/viz/drag_widget.py", line 164, in __call__
viz.args.reset = reset
UnboundLocalError: local variable 'reset' referenced before assignment
`
| The same error | 2023-06-27T12:28:40 | 0.0 | [] | [] |
||
qentinelqi/qweb | qentinelqi__qweb-67 | 7a648b6addabbd96167dd8b80a454a66747db831 | diff --git a/QWeb/keywords/debug.py b/QWeb/keywords/debug.py
index 11847e8a..f6a94304 100644
--- a/QWeb/keywords/debug.py
+++ b/QWeb/keywords/debug.py
@@ -56,7 +56,7 @@ def debug_on(mode='draw'):
CONFIG.set_value('DefaultTimeout', 2)
CONFIG.set_value('Debug_Run', True)
dbg.debug()
-
+ CONFIG.set_value('DefaultTimeout', cur_timeout)
@keyword(tags=("Debug", "Error handling"))
def debug_off():
| DefaultTimeout is left to 2s after exiting debug mode
I use DebugOn keyword to troubleshoot test cases. Debug mode sets the default timeout to 2s but when I exit debug mode with ctrl-d, timeout is not reverted back to original. This causes tests to fail.
| 2022-03-16T10:26:57 | 0.0 | [] | [] |
|||
georgy7/catframes | georgy7__catframes-41 | 341a340a59ae0584ef83969b9411a323df70d6ce | diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..1af045c
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,127 @@
+# Catframes Changelog
+
+## [2024.8.0] â 2024-08-31
+### Added
+- GUI application written in Tkinter. You can run it like this: `catmanager`
+- System option `--live-preview` (GUI related).
+
+### Changed
+- The `WARN` overlay becomes optional. These warnings embedded in the video stream
+ are quite useless for casual users who run the script manually, and whose data
+ is not related to CCTV.
+
+### Fixed
+- We have eliminated network-related delays on some platforms (mostly Windows) by switching
+ to pipes. Now the script writes uncompressed stream of pixels to the FFmpeg standard input.
+ Thus, on some systems, the execution was accelerated three times.
+
+### Removed
+- Web server. Firewall warnings will no longer appear.
+
+### Deprecated
+- System option `--port-range`: it does not affect anything.
+
+
+## [2024.4.0] â 2024-05-04
+Some things in the source code have been renamed,
+and some comments have been translated into English.
+
+
+## [2024.3.1] â 2024-04-03
+### Fixed
+- WSGIServer shutdown
+
+
+## [2024.3.0] â 2024-03-31
+### Changed
+- I preload frames in a separate thread. It's about 15 percent faster.
+- Using other VP9 options results in slightly larger files, but 1.5â2 times faster.
+
+
+## [2024.2.0] â 2024-02-07
+### Changed
+- Destination file name is allowed (and ignored) with option `--resolutions`
+- No subsampling in high quality mode; moderate subsampling in medium quality mode
+
+### Added
+- Support for QOI and PCX input image formats
+
+### Removed
+- Options `--trim-start` and `--trim-end`
+
+
+## [2023.9.1] â 2023-10-19
+### Fixed
+- A bug in the resolution resolving algorithm, leading to division by zero in the presence
+ of odd frame sizes due to incorrect use of rounded (to even) size.
+
+
+## [2023.9.0] â 2023-09-24
+The version numbers now almost match the SemVer recommendations.
+
+### Changed
+- Video resolution resolving algorithm copes better with controversial situations.
+
+### Added
+- The ability to set the range of allowed ports (`-p min:max`).
+- The ability to make videos even from empty or non-existent folders (`--sure`).
+
+### Deprecated
+- Options `--trim-start` and `--trim-end`
+
+
+## [2023.5] â 2023-05-19
+Not published in PYPI.
+
+The script works in Python 3.7 again.
+
+
+## [2022.10] â 2022-10-09
+Not published in PYPI.
+
+Complete re-implementation that uses a web server to pass frames to FFmpeg.
+Thus, intermediate results of image processing are not saved to the disk, which reduces its wear.
+
+### Added
+- The ability to compress images not from the current folder.
+- The ability to concatenate multiple folders into one video.
+- A DSL named OverLang to mark up overlays.
+- Result guarantee: the video will be made under any circumstances!
+- Warning screens (refers to the previous item). They are embedded in the video stream.
+
+### Changed
+- It sorts files in natural order (in each folder separately).
+- The text color and the background color of the text dynamically adjust to the video
+ (the solid background fill is designed to make the text readable against a non-uniform background).
+- Videos have black margins (black background) by default.
+
+
+## Early history
+[The first prototypes] developed between 2018 and 2020 used FFmpeg with [ImageMagick].
+
+They had the following problems:
+
+1. The program used disk to process the frames. It was slow on HDD and wore out SSD.
+2. It processed the frames in-place. If something went wrong, the program could simply spoil the original data.
+3. It chose the most frequent resolution, which, under special circumstances, could lower the quality of most frames.
+ I added an improved resolution selection method based on the weighted average only in September 2020.
+4. Its scaling heuristic was quite confusing and stupid.
+5. Lexicographic file sorting is usually not what you expect.
+6. Poor default settings (acid colors, one frame per second).
+
+There was also an option to remove the original data after processing.
+It make sense, probably, when compressing CCTV frames.
+
+
+
+[ImageMagick]: https://imagemagick.org/
+[The first prototypes]: https://github.com/georgy7/catframes/tree/e65eb40a6d98b72a9d6609c057254a7ede3a0959
+[2022.10]: https://github.com/georgy7/catframes/tree/b919b07d7e2944aaab79181c4312aba083ffd1d9
+[2023.5]: https://github.com/georgy7/catframes/tree/008297abe6e821f0aeda6a327ae8c15220995402
+[2023.9.0]: https://github.com/georgy7/catframes/tree/archive/dev_2023_09
+[2023.9.1]: https://github.com/georgy7/catframes/tree/v2023.9.1
+[2024.2.0]: https://github.com/georgy7/catframes/tree/v2024.2.0
+[2024.3.0]: https://github.com/georgy7/catframes/tree/v2024.3.0
+[2024.3.1]: https://github.com/georgy7/catframes/tree/v2024.3.1
+[2024.4.0]: https://github.com/georgy7/catframes/tree/v2024.4.0
+[2024.8.0]: https://github.com/georgy7/catframes/tree/v2024.8.0
diff --git a/LICENSE.txt b/LICENSE.txt
index 485d802..4df391a 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,5 +1,6 @@
-© УÑÑинов Ð.Ð., 2022â2024
-© СÑÑоваÑÑкий Ð.Ð., 2024
+© ÐеоÑгий УÑÑинов, 2022â2024
+© Ðогдан СÑÑоваÑÑкий, 2024
+© Ðвгений ÐкаÑÑев, 2024
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
diff --git a/README.md b/README.md
index c106b49..28de94d 100644
--- a/README.md
+++ b/README.md
@@ -1,117 +1,77 @@
Catframes
=========
-A script that concatenates frames. FFmpeg wrapper.
+It concatenates frames.
-With this software, you can, for example,
+With this software, you can
-* create a timelapse video,
-* turn an animation rendered in a PNG sequence into a video,
-* or compress a sequence of frames from a surveillance camera using a video codec.
+* create a timelapse video
+* turn an animation rendered in a PNG sequence into a video
+* compress your selfies
+* compress a sequence of frames from CCTV
-The script simply accepts folders with JPEG and PNG files as input and outputs MP4 or WebM.
+The script takes folders with images and outputs MP4 or WebM.
-Installation
-------------
-
-### From PyPI
-
-I recommend to do it as root.
-
-```
-python3 -m pip install catframes
-```
-
-Installing dependencies:
-
-Alpine: `apk add ffmpeg font-dejavu`
-
-Debian/Ubuntu: `apt-get install ffmpeg fonts-dejavu`
-
-Centos/RHEL: `yum install ffmpeg dejavu-sans-mono-fonts`
+It has GUI: `catmanager`
-Windows: [Get FFmpeg Windows builds](https://ffmpeg.org/download.html)
+What exactly does it do
+-----------------------
-### From source (Linux, Unix-like)
-
-Catframes is a script. Everything, including tests,
-is contained in a single file that you can upload
-to a system yourself.
-
-```
-cp catframes.py /usr/local/bin/
-chmod 755 /usr/local/bin/catframes.py
-ln -s /usr/local/bin/catframes.py /usr/local/bin/catframes
-```
-
-Installing dependencies: exactly the same, except for [Pillow](https://python-pillow.org/).
+1. It takes the folders in the order you specify them.
+2. It sorts images by name in natural order in each folder separately.
+3. It checks the resolution of each image and counts their numbers.
+4. It heuristically chooses the video resolution from the collected data.
+5. It resizes each image (on the fly) to fit into that resolution, preserving the aspect ratio and aligning the images in the center.
+6. It overlays various text information on the frames, if you specified this in the arguments.
+7. It concatenates the frames, having a fairly high resistance to emergencies.
-If you don't want to use pip, the library usually can be installed from operating system repository.
-Alpine: `apk add py3-pillow`
-
-Debian/Ubuntu: `apt-get install python3-pil`
-
-Centos/RHEL: `yum install python3-pillow`
+Installation
+------------
-It is better to run tests as a regular user.
+Do this as root for system-wide installation:
```
-python3 -m unittest discover /usr/local/bin/ -p catframes.py
+python3 -m pip install catframes
```
+You can also copy `catframes.py` to `/usr/local/bin` manually.
+But then you will also need to install [Pillow](https://pypi.org/project/Pillow/#files).
-### From source (Windows)
+Dependencies that are not installable from PYPI:
-0. You need Python3 to be available in `cmd` as `python3`.
-1. Copy both `catframes.py` and `catframes.bat` to a folder (for instance, `C:\Program Files\MyScripts`).
-2. Add this folder to `PATH` environment variable.
-3. Install [FFmpeg](https://ffmpeg.org/download.html).
-4. Add the folder, where `ffmpeg.exe` is installed, to the `PATH` environment variable.
-5. Install Pillow.
+1. FFmpeg
+2. Monospaced fonts
-You don't have to install fonts, because Windows already has Courier New.
-
-You may install Pillow, using `pip`.
-
-If you don't use `pip` for some reason, you may
-[download Pillow `*.whl` file](https://pypi.org/project/Pillow/#files),
-corresponding to your Python version, unpack it and put `PIL`
-in your Python interpreter directory.
-This usually works.
-Files with `whl` extension are ordinary ZIP archives.
-
-If everything is done, the following commands will work in any folder.
-
-```
-ffmpeg -version
+Alpine: `apk add ffmpeg font-dejavu`
-catframes --help
-```
+Debian/Ubuntu: `apt-get install ffmpeg fonts-dejavu`
-You may run unit tests with the following line:
+Centos/RHEL: `yum install ffmpeg dejavu-sans-mono-fonts`
-```
-python3 -m unittest discover "C:\Program Files\MyScripts" -p catframes.py
-```
+Windows: [FFmpeg builds](https://ffmpeg.org/download.html); Courier New included.
Usage
-----
-Video encoding is a long process. If you are launching the program for the first time,
-a good way to try different options is to use `--limit` to make short video samples.
+If you are launching the program for the first time,
+use `--limit` option to try different options on short video samples.
+
+ catframes --limit=3 sourceFolder sample.mp4
The command to run it with default settings looks like this:
catframes folderA folderB folderC result.webm
-For automatic launches (through a CRON job, etc.), I recommend to add these options:
+For automatic launches (through a CRON job, etc.), it's better to add `--force` and `--sure` options:
- catframes -sf --port-range=10140:10240 folderA folderB folderC result.webm
+ catframes -sf folderA folderB folderC result.webm
-### Default settings
+
+Default settings
+----------------
**Frame rate:** 30 frames per second.
@@ -130,13 +90,15 @@ Acceptable values are `poor`, `medium`, `high`.
You may change it with `--margin-color`.
It takes values in formats `#rrggbb` and `#rgb` (hexadecimal digits; `#abc` means `#aabbcc`).
-### Text overlays
+
+Text overlays
+-------------
There is a 3 by 3 grid. You can place labels in all cells except the central one.
Please, use `--left`, `--right`, `--top`, `--left-top`, etc.
-**Important:** One of the cells must be reserved for important warnings.
+One of the cells may be reserved for important warnings.
It is set by `WARN` value (in any case). By default, this is the top cell.
You can use any constant text in the labels, including line feeds (`\n`).
@@ -145,7 +107,7 @@ information about the source image or about the system.
To prevent special characters from being interpreted, you should use
*single quotes in Unix Shell*, however,
-you **must** use double quotes in the Windows command line.
+you must use double quotes in the Windows command line.
Example (Bash):
@@ -164,4 +126,4 @@ Read more about these functions in the docs.
See also
--------
-**[Documentation (in Russian)](https://itustinov.ru/cona/latest/docs/html/catframes.html)**
+**[Documentation](https://itustinov.ru/cona/latest/docs/html/catframes.html)** (in Russian, outdated)
diff --git a/pyproject.toml b/pyproject.toml
index 4c685af..1c8190a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,8 +1,8 @@
[project]
name = "catframes"
-version = "2024.8.0-SNAPSHOT"
+version = "2024.8.0"
-description = "A handy tool for combining images into videos via FFmpeg."
+description = "Video glue for image folders."
readme = "README.md"
license = {file = "LICENSE.txt"}
@@ -26,9 +26,9 @@ dependencies = [
]
[project.urls]
-"Docs" = "http://itustinov.ru/cona/latest/docs/html/catframes.html"
+Homepage = "https://itustinov.ru/"
"Source code" = "https://github.com/georgy7/catframes"
-"Alternative repository" = "https://gitflic.ru/project/georgy7/cona"
+Changelog = "https://github.com/georgy7/catframes/blob/main/CHANGELOG.md"
[project.scripts]
catframes = "catframes.catframes:main"
diff --git a/src/catframes/catframes.py b/src/catframes/catframes.py
index df2b03b..947bb34 100644
--- a/src/catframes/catframes.py
+++ b/src/catframes/catframes.py
@@ -74,7 +74,7 @@
from PIL import Image, ImageColor, ImageDraw, ImageFont
-__version__ = '2024.8.0-SNAPSHOT'
+__version__ = '2024.8.0'
__license__ = 'Zlib'
@@ -360,13 +360,13 @@ def test_list_images_2(self):
def test_list_images_of_non_existent_folder(self):
with tempfile.TemporaryDirectory() as folder_path_string:
fake_path = Path(folder_path_string) / 'fake'
- with self.assertRaisesRegex(ValueError, r'\S+'):
+ with self.assertRaisesRegex(ValueError, '\S+'):
FileUtils.list_images(fake_path)
def test_list_images_of_non_existent_parent(self):
with tempfile.TemporaryDirectory() as folder_path_string:
fake_path = Path(folder_path_string) / 'fake_parent' / 'a_folder'
- with self.assertRaisesRegex(ValueError, r'\S+'):
+ with self.assertRaisesRegex(ValueError, '\S+'):
FileUtils.list_images(fake_path)
def test_list_images_of_forbidden_folder(self):
@@ -1300,162 +1300,221 @@ def apply(self, frame: Frame) -> bytes:
class Quality(Enum):
- """Abstraction over endless FFmpeg quality settings."""
+ """ÐбÑÑÑакÑÐ¸Ñ Ð½Ð°Ð´ беÑконеÑнÑми наÑÑÑойками каÑеÑÑва FFmpeg."""
- HIGH = 1
- """Very high, but still lossÑ. It is suitable for artistic timelaps,
- where it is important to preserve the texture, light iridescence,
- and grain of the camera. It has a bitrate like MJPEG with 75% quality.
+ HIGH = 'HIGH'
+ """ÐÑÐµÐ½Ñ Ð²ÑÑокое, но вÑÑ Ð¶Ðµ Ñ Ð¿Ð¾ÑеÑÑми. ÐодÑ
Ð¾Ð´Ð¸Ñ Ð´Ð»Ñ Ñ
ÑдожеÑÑвеннÑÑ
ÑаймлапÑов, где важно
+ ÑоÑ
ÑаниÑÑ ÑекÑÑÑÑÑ, ÑвеÑовÑе пеÑеливÑ, зеÑниÑÑоÑÑÑ ÐºÐ°Ð¼ÐµÑÑ. ÐиÑÑÐµÐ¹Ñ â как Ñ JPEG 75.
"""
- MEDIUM = 2
- """It is suitable for almost any task. The graininess of the video
- disappears, the gradients become a little rougher, the picture may be
- a little murkier, but the details are easily recognizable.
+ MEDIUM = 'MEDIUM'
+ """ÐодойдÑÑ Ð¿Ð¾ÑÑи Ð´Ð»Ñ Ð»ÑбÑÑ
задаÑ. ÐеÑниÑÑоÑÑÑ Ð²Ð¸Ð´ÐµÐ¾ пÑопадаеÑ, гÑадиенÑÑ ÑÑановÑÑÑÑ ÑÑÑÑ
+ гÑÑбее, каÑÑинка Ð¼Ð¾Ð¶ÐµÑ Ð±ÑÑÑ ÑÑÑÑ Ð¼ÑÑнее, но деÑали легко ÑзнаваемÑ.
"""
- POOR = 3
- """Some small details become indistinguishable."""
-
-
-class Encoder(ABC):
- """FFmpeg options with a certain level of quality."""
- def fits(self) -> bool:
- """Can be used on this computer"""
- def get_options(self) -> Sequence[str]:
- """Encoding options"""
-
-
-class X264Encoder(Encoder):
- def __init__(self, quality: Quality):
-
- if Quality.HIGH == quality:
- self.preset = 'slow'
- self.crf = '6'
- elif Quality.MEDIUM == quality:
- self.preset = 'slow'
- self.crf = '14'
- else:
- self.preset = 'fast'
- self.crf = '22'
-
- def fits(self) -> bool:
- return True
-
- def get_options(self) -> Sequence[str]:
+ POOR = 'POOR'
+ """ÐекоÑоÑÑе мелкие деÑали ÑÑановÑÑÑÑ Ð½ÐµÑазлиÑимÑми."""
+
+
+class Encoder:
+ def get_options(self, quality: Quality, fps: int = None) -> Sequence[str]:
+ raise NotImplementedError("Subclasses should implement this method")
+
+
+class H264Encoder(Encoder):
+ _settings = {
+ Quality.HIGH: {'crf': '1', 'pix_fmt': 'yuv444p'},
+ Quality.MEDIUM: {'crf': '12', 'pix_fmt': 'yuv422p'},
+ Quality.POOR: {'crf': '22', 'pix_fmt': 'yuv420p'}
+ }
+
+ def get_options(self, quality: Quality, fps: int) -> Sequence[str]:
+ settings = self._settings[quality]
+ crf = settings['crf']
+ pix_fmt = settings['pix_fmt']
+ if fps is not None:
+ crf = round(crf + 2.3 * math.log2(60 / fps))
return [
'-c:v', 'libx264',
- '-preset:v', self.preset,
- '-crf', self.crf,
- '-pix_fmt', 'yuv444p',
- '-tune', 'grain',
- '-vf', 'hqdn3d'
+ '-preset', 'fast',
+ '-tune', 'fastdecode',
+ '-movflags', '+faststart',
+ '-pix_fmt', pix_fmt,
+ '-crf', crf
]
-class NVENC_Encoder(Encoder):
- def __init__(self, quality: Quality):
-
- if Quality.HIGH == quality:
- self.preset = 'slow'
- self.profile = 'high'
- self.bitrate = '35'
- self.bufsize = '10000k'
- self.cq = '4'
-
- elif Quality.MEDIUM == quality:
- self.preset = 'slow'
- self.profile = 'high'
- self.bitrate = '20'
- self.bufsize = '5000k'
- self.cq = '12'
- else:
- self.preset = 'fast'
- self.profile = 'main'
- self.bitrate = '8'
- self.bufsize = '1000k'
- self.cq = '20'
-
- self.max_bitrate = f'{str(int(self.bitrate) + 4)}M'
- self.bitrate += "M"
-
- def fits(self) -> bool:
- return True
+class VP9Encoder(Encoder):
+ _settings = {
+ Quality.HIGH: {'crf': '3', 'pix_fmt': 'yuv444p'},
+ Quality.MEDIUM: {'crf': '14', 'pix_fmt': 'yuv422p'},
+ Quality.POOR: {'crf': '31', 'pix_fmt': 'yuv420p'}
+ }
- def get_options(self) -> Sequence[str]:
+ def get_options(self, quality: Quality, fps: int = None) -> Sequence[str]:
+ settings = self._settings[quality]
+ crf = settings['crf']
+ pix_fmt = settings['pix_fmt']
+ return [
+ '-c:v', 'libvpx-vp9',
+ '-deadline', 'realtime',
+ '-cpu-used', '4',
+ '-pix_fmt', pix_fmt,
+ '-crf', crf,
+ '-b:v', '0'
+ ]
+
+
+class H264NvencEncoder(Encoder):
+ _settings = {
+ Quality.HIGH: {'qp': '7', 'pix_fmt': 'yuv444p'},
+ Quality.MEDIUM: {'qp': '14', 'pix_fmt': 'yuv422p'},
+ Quality.POOR: {'qp': '28', 'pix_fmt': 'yuv420p'}
+ }
+
+ def get_options(self, quality: Quality, fps: int = None) -> Sequence[str]:
+ settings = self._settings[quality]
+ qp = settings['qp']
+ pix_fmt = settings['pix_fmt']
return [
- '-c:v', 'h264_nvenc',
- '-preset:v', self.preset,
- '-profile:v', self.profile,
- '-cq', self.cq,
- '-rc', 'cbr',
- '-b:v', self.bitrate,
- '-maxrate', self.max_bitrate,
- '-bufsize', self.bufsize,
- '-bf', '3',
- '-pix_fmt', 'yuv444p',
- '-rc-lookahead', '25',
- '-vf', 'hqdn3d'
+ '-c:v', 'h264_nvenc',
+ '-preset', 'fast',
+ '-movflags', '+faststart',
+ '-pix_fmt', pix_fmt,
+ '-qp', qp
+ ]
+
+
+class H264AmfEncoder(Encoder):
+ _settings = {
+ Quality.HIGH: {'qp_i': '7', 'qp_p': '9'},
+ Quality.MEDIUM: {'qp_i': '16', 'qp_p': '18'},
+ Quality.POOR: {'qp_i': '26', 'qp_p': '27'}
+ }
+
+ def get_options(self, quality: Quality, fps: int = None) -> Sequence[str]:
+ settings = self._settings[quality]
+ qp_i = settings['qp_i']
+ qp_p = settings['qp_p']
+ return [
+ '-c:v', 'h264_amf',
+ '-preset', 'speed',
+ '-movflags', '+faststart',
+ '-pix_fmt', 'yuv420p',
+ '-rc', 'cqp',
+ '-qp_i', qp_i,
+ '-qp_p', qp_p
+ ]
+
+
+class HevcNvencEncoder(Encoder):
+ _settings = {
+ Quality.HIGH: {'qp': '7', 'pix_fmt': 'yuv444p'},
+ Quality.MEDIUM: {'qp': '14', 'pix_fmt': 'yuv422p'},
+ Quality.POOR: {'qp': '28', 'pix_fmt': 'yuv420p'}
+ }
+
+ def get_options(self, quality: Quality, fps: int = None) -> Sequence[str]:
+ settings = self._settings[quality]
+ qp = settings['qp']
+ pix_fmt = settings['pix_fmt']
+ return [
+ '-c:v', 'hevc_nvenc',
+ '-preset', 'fast',
+ '-movflags', '+faststart',
+ '-pix_fmt', pix_fmt,
+ '-qp', qp
]
-class H264Amf(Encoder):
- """H264 encoder for AMD.
- It has exactly the same performance as x264 on my laptop
- with AMD A8-6410 APU (AMD Radeon R5 Graphics).
- However, it gives a cleaner result.
- The frame rate does not affect it.
- It does not support 4:4:4 pixel format, so in my opinion
- it is not suitable for high quality video.
- """
- def __init__(self, quality: Quality):
- if Quality.HIGH == quality:
- self.qp = -1
- elif Quality.MEDIUM == quality:
- self.qp = 18
- else:
- self.qp = 27
-
- def fits(self) -> bool:
- return (self.qp > 0)
+class HevcAmfEncoder(Encoder):
+ _settings = {
+ Quality.HIGH: {'qp_i': '7', 'qp_p': '9'},
+ Quality.MEDIUM: {'qp_i': '16', 'qp_p': '18'},
+ Quality.POOR: {'qp_i': '26', 'qp_p': '27'}
+ }
- def get_options(self) -> Sequence[str]:
+ def get_options(self, quality: Quality, fps: int = None) -> Sequence[str]:
+ settings = self._settings[quality]
+ qp_i = settings['qp_i']
+ qp_p = settings['qp_p']
return [
- '-pix_fmt', 'nv12',
- '-c:v', 'h264_amf',
- '-usage', 'lowlatency',
+ '-c:v', 'hevc_amf',
+ '-preset', 'speed',
+ '-movflags', '+faststart',
+ '-pix_fmt', 'yuv420p',
'-rc', 'cqp',
- '-qp_i', str(self.qp),
- '-qp_p', str(self.qp),
- '-qp_b', str(self.qp)
+ '-qp_i', qp_i,
+ '-qp_p', qp_p
]
-class VP8Encoder(Encoder):
- def __init__(self, quality: Quality):
+class FFmpegEncoderChecker:
+ def __init__(self):
+ self.hardware_encoders = {
+ 'h264_nvenc': H264NvencEncoder,
+ 'h264_amf': H264AmfEncoder,
+ 'hevc_nvenc': HevcNvencEncoder,
+ 'hevc_amf': HevcAmfEncoder,
+ }
+ #TODO h264_qsv, hevc_qsv, h264_v4l2m2m
+
+ self.software_encoders = {
+ 'libx264': H264Encoder,
+ 'libvpx': VP9Encoder
+ }
+
+ self.encoder_map = {
+ '.mp4': ['h264_nvenc', 'h264_amf', 'h264_qsv', 'h264_v4l2m2m', 'hevc_nvenc', 'hevc_amf', 'libx264'],
+ '.webm': ['libvpx']
+ }
+ """
+ Ð encoder_map наÑÑÑаиваеÑÑÑ Ð¿ÑиоÑиÑеÑноÑÑÑ ÑнкодеÑа.
+ ÐÑо пеÑвÑй доÑÑÑпнÑй и ÑабоÑий бÑдеÑ, ÑÐ¾Ñ Ð¸ бÑÐ´ÐµÑ Ð²ÑбÑан.
+ """
- if Quality.HIGH == quality:
- self.quality = 'best'
- self.bitrate = '15M'
- elif Quality.MEDIUM == quality:
- self.quality = 'good'
- self.bitrate = '8M'
- else:
- self.quality = 'good'
- self.bitrate = '4M'
+ def check_encoders(self):
+ """ÐозвÑаÑÐ°ÐµÑ ÑпиÑок акÑÑалÑнÑÑ
ÑнкодеÑов, коÑоÑÑе доÑÑÑÐ¿Ð½Ñ Ð² ffmpeg"""
+ try:
+ result = subprocess.run(['ffmpeg', '-encoders'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ encoders = result.stdout
+ except FileNotFoundError:
+ print("FFmpeg is not installed on this system.")
+ return []
+
+ available_encoders = []
+ for line in encoders.splitlines():
+ for encoder in {**self.hardware_encoders, **self.software_encoders}:
+ if encoder in line:
+ available_encoders.append(encoder)
+ break
+
+ return list(set(available_encoders))
- def fits(self) -> bool:
- return True
+ def test_encoder(self, encoder):
+ """
+ ÐÑÐ¸Ð½Ð¸Ð¼Ð°ÐµÑ ÐºÐ¾Ð´Ð¸ÑовÑик в каÑеÑÑве аÑгÑменÑа, генеÑиÑÑÐµÑ ÑеÑÑовÑй Ñаблон Ñ Ð¿Ð¾Ð¼Ð¾ÑÑÑ Ð¾Ð¿Ñии -f lavfi
+ и color=c=black:s=1280x720:d=1 (ÑеÑнÑй ÑкÑан ÑазмеÑом 1280x720 пикÑелей, длиÑелÑноÑÑÑ 1 ÑекÑнда),
+ Ð´Ð»Ñ ÐºÐ¾Ð´Ð¸ÑÐ¾Ð²Ð°Ð½Ð¸Ñ ÑеÑÑового Ñаблона Ñ Ð¸ÑполÑзованием Ñказанного кодиÑовÑика и оÑпÑавлÑем вÑвод в /dev/null
+ (или nul на Windows) Ñ Ð¿Ð¾Ð¼Ð¾ÑÑÑ Ð¾Ð¿Ñии -f null -
+ """
+ try:
+ subprocess.run(['ffmpeg', '-y', '-f', 'lavfi', '-i', 'color=c=black:s=1280x720:d=1', '-c:v', encoder, '-f', 'null', '-'], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ return True
+ except subprocess.CalledProcessError:
+ return False
- def get_options(self) -> Sequence[str]:
- return [
- '-c:v', 'libvpx',
- '-quality', self.quality,
- '-b:v', self.bitrate,
- '-lag-in-frames', '20',
- '-arnr-maxframes', '15',
- '-arnr-strength', '5'
- ]
+ def select_encoder(self, file_extension) -> Encoder:
+ """ÐозвÑаÑÐ°ÐµÑ ÑкземплÑÑ ÐºÐ»Ð°ÑÑа ÑнкодеÑа Ð´Ð»Ñ Ñказанного ÑÑÑÑикÑа"""
+ available_encoders = self.check_encoders()
+
+ for encoder in self.encoder_map[file_extension]:
+ if encoder in available_encoders and self.test_encoder(encoder):
+ encoder_class = {**self.hardware_encoders, **self.software_encoders}.get(encoder)
+ if encoder_class:
+ return encoder_class()
+
+ raise ValueError(f"No working encoders found for {file_extension}.")
@dataclass(frozen=True)
@@ -1494,9 +1553,13 @@ def limit_frames(self, frames: Sequence[Frame]):
class OutputProcessor:
def __init__(self, options: OutputOptions):
self._options = options
+ self._encoder_checker = FFmpegEncoderChecker()
self._exit_lock = threading.Lock()
self._write_pixels_control: Queue = Queue(maxsize = 10)
+ def _get_encoder_options(self, encoder: Encoder) -> Sequence[str]:
+ return encoder.get_options(self._options.quality, self._options.frame_rate)
+
def exit_threads(self):
"""To terminate all threads running in the main method in a controlled manner."""
with self._exit_lock:
@@ -1526,15 +1589,11 @@ def set_processed(count):
'-r', str(self._options.frame_rate),
'-i', '-'
]
-
+
suffix = self._options.destination.suffix
- if suffix == '.mp4':
- ffmpeg_options.extend(NVENC_Encoder(self._options.quality).get_options())
- elif suffix == '.webm':
- ffmpeg_options.extend(VP8Encoder(self._options.quality).get_options())
- else:
- raise ValueError('Unsupported file name suffix.')
-
+ selected_encoder = self._encoder_checker.select_encoder(suffix)
+
+ ffmpeg_options.extend(self._get_encoder_options(selected_encoder))
ffmpeg_options.extend([
'-r', str(self._options.frame_rate),
('-y' if self._options.overwrite else '-n'),
@@ -1550,7 +1609,6 @@ def set_processed(count):
catched = None
process = subprocess.Popen(ffmpeg_options,
- bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
| Ðогда-нибÑÐ´Ñ Ð² бÑдÑÑем добавиÑÑ Ð¿Ð°ÑамеÑÑ fps.
ЧÑÐ¾Ð±Ñ Ð´ÐµÐ»Ð°ÑÑ Ð²Ð¸Ð´ÐµÐ¾, где оÑÐµÐ½Ñ Ð±ÑÑÑÑо ÑÑо-Ñо пÑоиÑÑ
одиÑ.
Рпо-ÑмолÑÐ°Ð½Ð¸Ñ ÑделаÑÑ, Ñкажем, 4 fps.
| 2024-09-13T21:52:10 | 0.0 | [] | [] |
|||
tellor-io/telliot-feeds | tellor-io__telliot-feeds-430 | 87aba4dd34fa22904e1bbdf8c04f2efc38917ec1 | diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py
index 0512888c..07fe0b9f 100644
--- a/src/telliot_feeds/cli/commands/report.py
+++ b/src/telliot_feeds/cli/commands/report.py
@@ -459,16 +459,11 @@ async def report(
**common_reporter_kwargs,
}
- if sig_acct_addr != "":
- reporter = FlashbotsReporter(
- signature_account=sig_account,
- **tellorx_reporter_kwargs,
- )
- elif custom_contract_reporter:
+ if custom_contract_reporter:
reporter = CustomXReporter(
custom_contract=custom_contract,
**tellorx_reporter_kwargs,
- ) # type: ignore
+ )
else:
reporter = IntervalReporter(**tellorx_reporter_kwargs) # type: ignore
@@ -493,7 +488,12 @@ async def report(
common_reporter_kwargs["expected_profit"] = expected_profit
# selecting the right reporter will be changed after the switch
if flex_360:
- if rng_auto:
+ if sig_acct_addr != "":
+ reporter = FlashbotsReporter( # type: ignore
+ signature_account=sig_account,
+ **common_reporter_kwargs,
+ )
+ elif rng_auto:
reporter = RNGReporter( # type: ignore
wait_period=120 if wait_period < 120 else wait_period,
**common_reporter_kwargs,
diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py
index 42d99e60..13e90d81 100644
--- a/src/telliot_feeds/cli/utils.py
+++ b/src/telliot_feeds/cli/utils.py
@@ -33,7 +33,7 @@ def reporter_cli_core(ctx: click.Context) -> TelliotCore:
# Ensure chain id compatible with flashbots relay
if ctx.obj["SIGNATURE_ACCOUNT_NAME"] is not None:
# Only supports mainnet
- assert core.config.main.chain_id == 1
+ assert core.config.main.chain_id in (1, 5)
if ctx.obj["TEST_CONFIG"]:
try:
diff --git a/src/telliot_feeds/flashbots/provider.py b/src/telliot_feeds/flashbots/provider.py
index 4a1579db..a9ff4e18 100644
--- a/src/telliot_feeds/flashbots/provider.py
+++ b/src/telliot_feeds/flashbots/provider.py
@@ -21,11 +21,12 @@
from web3.types import RPCResponse
-def get_default_endpoint() -> URI:
- return URI(
- os.environ.get("FLASHBOTS_HTTP_PROVIDER_URI", "https://relay.flashbots.net")
- # os.environ.get("FLASHBOTS_HTTP_PROVIDER_URI", "https://bundle.miningdao.io/")
- )
+def get_default_endpoint(chain_id: int = 1) -> URI:
+ uri = {
+ 1: URI(os.environ.get("FLASHBOTS_HTTP_PROVIDER_URI", "https://relay.flashbots.net")),
+ 5: URI(os.environ.get("FLASHBOTS_HTTP_PROVIDER_URI_GOERLI", "https://relay-goerli.flashbots.net")),
+ }
+ return uri[chain_id]
class FlashbotProvider(HTTPProvider):
diff --git a/src/telliot_feeds/reporters/flashbot.py b/src/telliot_feeds/reporters/flashbot.py
index f1956601..9f14c3c7 100644
--- a/src/telliot_feeds/reporters/flashbot.py
+++ b/src/telliot_feeds/reporters/flashbot.py
@@ -5,74 +5,40 @@
from typing import Any
from typing import Optional
from typing import Tuple
-from typing import Union
from chained_accounts import ChainedAccount
from eth_account.account import Account
from eth_account.signers.local import LocalAccount
from eth_utils import to_checksum_address
-from telliot_core.contract.contract import Contract
-from telliot_core.model.endpoints import RPCEndpoint
from telliot_core.utils.response import error_status
from telliot_core.utils.response import ResponseStatus
from web3 import Web3
from web3.datastructures import AttributeDict
from web3.exceptions import TransactionNotFound
-from telliot_feeds.datafeed import DataFeed
from telliot_feeds.flashbots import flashbot # type: ignore
from telliot_feeds.flashbots.provider import get_default_endpoint # type: ignore
-from telliot_feeds.reporters.interval import IntervalReporter
+from telliot_feeds.reporters.tellor_360 import Tellor360Reporter
from telliot_feeds.utils.log import get_logger
logger = get_logger(__name__)
-class FlashbotsReporter(IntervalReporter):
+class FlashbotsReporter(Tellor360Reporter):
"""Reports values from given datafeeds to a TellorX Oracle
every 10 seconds."""
- def __init__(
- self,
- endpoint: RPCEndpoint,
- account: ChainedAccount,
- signature_account: ChainedAccount,
- chain_id: int,
- master: Contract,
- oracle: Contract,
- datafeed: Optional[DataFeed[Any]] = None,
- expected_profit: Union[str, float] = 100.0,
- transaction_type: int = 0,
- gas_limit: int = 350000,
- max_fee: Optional[int] = None,
- priority_fee: int = 5,
- legacy_gas_price: Optional[int] = None,
- gas_price_speed: str = "fast",
- ) -> None:
-
- self.endpoint = endpoint
- self.account: LocalAccount = Account.from_key(account.key)
+ def __init__(self, signature_account: ChainedAccount, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.account: LocalAccount = Account.from_key(self.account.key)
self.signature_account: LocalAccount = Account.from_key(signature_account.key)
- self.master = master
- self.oracle = oracle
- self.datafeed = datafeed
- self.chain_id = chain_id
- self.acct_addr = to_checksum_address(account.address)
self.sig_acct_addr = to_checksum_address(signature_account.address)
- self.last_submission_timestamp = 0
- self.expected_profit = expected_profit
- self.transaction_type = transaction_type
- self.gas_limit = gas_limit
- self.max_fee = max_fee
- self.priority_fee = priority_fee
- self.legacy_gas_price = legacy_gas_price
- self.gas_price_speed = gas_price_speed
logger.info(f"Reporting with account: {self.acct_addr}")
logger.info(f"Signature address: {self.sig_acct_addr}")
- flashbots_uri = get_default_endpoint()
+ flashbots_uri = get_default_endpoint(self.chain_id)
logger.info(f"Flashbots provider endpoint: {flashbots_uri}")
flashbot(self.endpoint._web3, self.signature_account, flashbots_uri)
@@ -123,9 +89,9 @@ async def report_once(
return None, error_status(msg, e=e, log=logger.error)
# Get nonce
- timestamp_count, read_status = await self.oracle.read(func_name="getTimestampCountById", _queryId=query_id)
+ timestamp_count, read_status = await self.oracle.read(func_name="getNewValueCountbyQueryId", _queryId=query_id)
if not read_status.ok:
- status.error = "Unable to retrieve timestampCount: " + read_status.error # error won't be none # noqa: E501
+ status.error = "Unable to retrieve newValueCount: " + read_status.error # error won't be none # noqa: E501
logger.error(status.error)
status.e = read_status.e
return None, status
diff --git a/src/telliot_feeds/reporters/tellor_360.py b/src/telliot_feeds/reporters/tellor_360.py
index 25d62d4b..1663829a 100644
--- a/src/telliot_feeds/reporters/tellor_360.py
+++ b/src/telliot_feeds/reporters/tellor_360.py
@@ -56,8 +56,6 @@ def __init__(self, stake: float = 0, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.stake: float = stake
- logger.info(f"Reporting with account: {self.acct_addr}")
-
assert self.acct_addr == to_checksum_address(self.account.address)
async def ensure_staked(self) -> Tuple[bool, ResponseStatus]:
| Telliot-feeds not reporting through flashbots network
Report confirmed utilising following CLI command: telliot-feeds -a XXXX -sa XXXX report --flex-360 -pwd XXXX -spwd XXXX -tx 2 -mf X -pf X -p X -gl XXXX
Flashbot network is not being used. LHS is block explorer showing that builder0x69 has won the block, not flashbot, and RHS is my latest report transaction.

| @LP-Crypto-Scholar, thank you! This is definitely a bug. This might be the real issue why might be front run. Will get a fix out Asap. | 2022-11-07T11:55:06 | 0.0 | [] | [] |
||
laminlabs/bionty-base | laminlabs__bionty-base-256 | 748d6f815c75b7af7fd7addb5efff139ee7bfc80 | diff --git a/bionty/_entity.py b/bionty/_entity.py
index 5192885f..b740e3fe 100644
--- a/bionty/_entity.py
+++ b/bionty/_entity.py
@@ -188,60 +188,6 @@ def _ontology_to_df(self, ontology: Ontology):
columns=["ontology_id", "name"],
).set_index(self._id)
- def _curate(
- self, df: pd.DataFrame, column: str = None, agg_col: str = None
- ) -> pd.DataFrame:
- """Curate index of passed DataFrame to conform with default identifier."""
- df = df.copy()
-
- if agg_col is not None:
- # if provided a column with aggregated values, performs alias mapping
- alias_map = explode_aggregated_column_to_expand(
- self.df.reset_index(), aggregated_col=agg_col, target_col=self._id
- )[self._id]
-
- if column is None:
- # when column is None, use index as the input column
- index_name = df.index.name
- df["__mapped_index"] = (
- df.index if agg_col is None else df.index.map(alias_map)
- )
- df["orig_index"] = df.index
- df.index = df["__mapped_index"].fillna(df["orig_index"])
- del df["__mapped_index"]
- df.index.name = index_name
- matches = check_if_index_compliant(df.index, self.df.index)
- else:
- orig_series = df[column]
- df[column] = df[column] if agg_col is None else df[column].map(alias_map)
- df[column] = df[column].fillna(orig_series)
- new_index, matches = get_compliant_index_from_column(
- df=df,
- ref_df=self.df,
- column=column,
- )
-
- # keep the original index name as column name if exists
- # otherwise name it "orig_index"
- if df.index.name is None:
- df["orig_index"] = df.index
- else:
- df[df.index.name] = df.index
- df.index = new_index
- df.index.name = self._id
- df[column] = orig_series.values # keep the original column untouched
- # annotated what complies with the default ID
- df["__curated__"] = matches
- # some stats for logging
- n_misses = len(matches) - matches.sum()
- frac_misses = round(n_misses / len(matches) * 100, 1)
- n_mapped = matches.sum()
- frac_mapped = 100 - frac_misses
- logger.success(f"{n_mapped} terms ({frac_mapped}%) are mapped.")
- logger.warning(f"{n_misses} terms ({frac_misses}%) are not mapped.")
-
- return df
-
@check_dynamicdir_exists
def _url_download(self, url: str):
"""Download file from url to dynamicdir."""
@@ -305,8 +251,14 @@ def _set_attributes(
available_db_versions = self._load_versions(source="local")
# Use the latest version if version is None.
- self._version = current_version if version is None else str(version)
self._database = current_database if database is None else str(database)
+ # Only the database was passed -> get the latest version from the available db versions # noqa: E501
+ if database and not version:
+ self._version = next(
+ iter(available_db_versions[self._database]["versions"])
+ )
+ else:
+ self._version = current_version if version is None else str(version)
self._url, self._md5 = (
available_db_versions.get(self._database).get("versions").get(self._version) # type: ignore # noqa: E501
)
@@ -364,3 +316,57 @@ def curate(
curated_df[orig_column] = orig_column_values
return curated_df
+
+ def _curate(
+ self, df: pd.DataFrame, column: str = None, agg_col: str = None
+ ) -> pd.DataFrame:
+ """Curate index of passed DataFrame to conform with default identifier."""
+ df = df.copy()
+
+ if agg_col is not None:
+ # if provided a column with aggregated values, performs alias mapping
+ alias_map = explode_aggregated_column_to_expand(
+ self.df.reset_index(), aggregated_col=agg_col, target_col=self._id
+ )[self._id]
+
+ if column is None:
+ # when column is None, use index as the input column
+ index_name = df.index.name
+ df["__mapped_index"] = (
+ df.index if agg_col is None else df.index.map(alias_map)
+ )
+ df["orig_index"] = df.index
+ df.index = df["__mapped_index"].fillna(df["orig_index"])
+ del df["__mapped_index"]
+ df.index.name = index_name
+ matches = check_if_index_compliant(df.index, self.df.index)
+ else:
+ orig_series = df[column]
+ df[column] = df[column] if agg_col is None else df[column].map(alias_map)
+ df[column] = df[column].fillna(orig_series)
+ new_index, matches = get_compliant_index_from_column(
+ df=df,
+ ref_df=self.df,
+ column=column,
+ )
+
+ # keep the original index name as column name if exists
+ # otherwise name it "orig_index"
+ if df.index.name is None:
+ df["orig_index"] = df.index
+ else:
+ df[df.index.name] = df.index
+ df.index = new_index
+ df.index.name = self._id
+ df[column] = orig_series.values # keep the original column untouched
+ # annotated what complies with the default ID
+ df["__curated__"] = matches
+ # some stats for logging
+ n_misses = len(matches) - matches.sum()
+ frac_misses = round(n_misses / len(matches) * 100, 1)
+ n_mapped = matches.sum()
+ frac_mapped = 100 - frac_misses
+ logger.success(f"{n_mapped} terms ({frac_mapped}%) are mapped.")
+ logger.warning(f"{n_misses} terms ({frac_misses}%) are not mapped.")
+
+ return df
| Automatically infer version for passed database
We currently require both database and version to be passed explicitly if the default is not used. The associated version for the passed database is currently being looked up in the `_current.yaml`.
If only the database is passed we should attempt to fetch the latest version of this ontology and just use it. A warning wouldn't hurt.
| Was just about to report this! ð
<img width="1006" alt="Screenshot 2023-03-07 at 12 51 50" src="https://user-images.githubusercontent.com/38218185/223323907-817a8d59-7e87-47c6-aa07-e6854bcaaa9c.png">
| 2023-03-13T17:33:50 | 0.0 | [] | [] |
||
nautobot/nautobot-app-golden-config | nautobot__nautobot-app-golden-config-353 | 31b7d6ddddb8de66f75edcc273934c5de8a60c79 | diff --git a/nautobot_golden_config/views.py b/nautobot_golden_config/views.py
index ecc198f9..e91ed78e 100644
--- a/nautobot_golden_config/views.py
+++ b/nautobot_golden_config/views.py
@@ -264,13 +264,15 @@ def post(self, request, **kwargs):
pk_list = [obj.pk for obj in self.filterset(request.GET, model.objects.only("pk")).qs]
else:
pk_list = model.objects.values_list("pk", flat=True)
+ # When selecting *all* the resulting request args are ConfigCompliance object PKs
+ obj_to_del = [item[0] for item in self.queryset.filter(pk__in=pk_list).values_list("id")]
else:
pk_list = request.POST.getlist("pk")
+ # When selecting individual rows the resulting request args are Device object PKs
+ obj_to_del = [item[0] for item in self.queryset.filter(device__pk__in=pk_list).values_list("id")]
form_cls = self.get_form()
- # The difference between nautobot core is the creation and usage of obj_to_del
- obj_to_del = [item[0] for item in self.queryset.filter(device__pk__in=pk_list).values_list("id")]
if "_confirm" in request.POST:
form = form_cls(request.POST)
if form.is_valid():
| Delete All config compliance objects doesn't work
### Environment
* Python version: <!-- Example: 3.7.7 --> 3.8.6
* Nautobot version: <!-- Example: 1.0.1 --> 1.3.7
* nautobot-golden-config version: <!-- Example: 1.0.0 --> 1.0.3
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current release.
-->
### Steps to Reproduce
1. Generate 26+ configuration compliance entries in the "Configuration Compliance" page/table
2. Change your table view to be 25 items at a time
3. Hit the check box to select all items on the page
4. In the new selection that appears, select "Select all ### config compliances matching query"
5. Click the "Delete All" button
<!-- What did you expect to happen? -->
### Expected Behavior
The confirmation page prompting me to delete all entries should appear. Note: This works when selecting multiple entries on a single page.
<!-- What happened instead? -->
### Observed Behavior
The below warning banner appears, and the page refreshes, without prompting me to confirm object deletion:
"No config compliances were selected for deletion."
| It should also be noted that this is limited to the "Delete All" button. Selecting <1000 items, and selecting "Delete" at the bottom of the page, will work as expected as long as all items are on the same visible page.
Is this different than nautobot? If not, likely to fall into âworks as expectedâ
@itdependsnetworks This works correctly on other core Nautobot pages, such as "Devices"
Are you saying this doesn't show up? like this:
<img width="1059" alt="image" src="https://user-images.githubusercontent.com/9260483/185747608-852f953b-f31a-4e1d-84a0-ed69ee96ee4b.png">
Not quite. On the plug-in, the button shows up. But when you click on it, the page refreshes and nothing happens.
On other core Nautobot pages, it loads an "Are you sure?" page.
Are the above replication steps not working correctly?
@jeffkala to take a look at recreating in his env
Can duplicate on demo.nautobot.com, it actually pops up with delete all 389 objects, you click the button and the next plage actually only loads the first object and says are you sure, folowed by not deleting it at all.
@matt852 what would you expect for this? To just have it reload back to having now "last" dates? This view is actually populated based on the device list. An actual object isn't even created until a backup/intended etc has been run on that device. | 2022-09-23T19:50:00 | 0.0 | [] | [] |
||
ika-rwth-aachen/docker-run | ika-rwth-aachen__docker-run-9 | 97b47ba1f94ee131eb248a8b299b9d05bb2756bb | diff --git a/README.md b/README.md
index 820861d..ed9d5dc 100644
--- a/README.md
+++ b/README.md
@@ -46,10 +46,11 @@ In general, you can pass the same arguments to `docker-run` as you would pass to
docker-run --volume $(pwd):/volume ubuntu ls /volume
```
-In addition to the arguments you are passing, `docker-run` however also enables the following features by default. Each of these default features can be disabled, see [Usage](#usage).
+In addition to the arguments you are passing, `docker-run` however also enables the following features by default. Most of these default features can be disabled, see [Usage](#usage).
- container removal after exit (`--rm`)
- interactive tty (`--interactive --tty`)
- current directory name as container name (`--name`)
+- relative bind mounts (`--volume [./RELATIVE_PATH>]:[TARGET_PATH]`)
- GPU support (`--gpus all` / `--runtime nvidia`)
- X11 GUI forwarding
diff --git a/docker-run-cli/pyproject.toml b/docker-run-cli/pyproject.toml
index d1db6b1..73c79d4 100644
--- a/docker-run-cli/pyproject.toml
+++ b/docker-run-cli/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "docker-run-cli"
-version = "0.9.6"
+version = "0.9.7"
description = "'docker run' and 'docker exec' with useful defaults"
license = {file = "LICENSE"}
readme = "README.md"
@@ -23,14 +23,14 @@ classifiers = [
"Operating System :: POSIX :: Linux",
]
keywords = ["docker", "container"]
-dependencies = []
+dependencies = ["GPUtil~=1.4.0"]
requires-python = ">=3.7"
[project.optional-dependencies]
dev = ["build", "twine"]
-docker-ros = ["docker-run-docker-ros>=1.0.4"]
-plugins = ["docker-run-docker-ros>=1.0.4"]
-all = ["docker-run-docker-ros>=1.0.4", "build", "twine"]
+docker-ros = ["docker-run-docker-ros>=1.0.5"]
+plugins = ["docker-run-docker-ros>=1.0.5"]
+all = ["docker-run-docker-ros>=1.0.5", "build", "twine"]
[project.urls]
"Repository" = "https://github.com/ika-rwth-aachen/docker-run"
diff --git a/docker-run-cli/scripts/docker-run b/docker-run-cli/scripts/docker-run
index ca238de..ccb0b9a 100755
--- a/docker-run-cli/scripts/docker-run
+++ b/docker-run-cli/scripts/docker-run
@@ -26,8 +26,21 @@ python3 -m docker_run "${@}" 2>&1 >$CMD_FILE
CMD=$(cat $CMD_FILE)
rm $CMD_FILE
+# convert command string to array to allow for escaped characters, e.g. "docker run -v /path\ with\ spaces:/path\ with\ spaces ..."
+CMD_ARRAY=()
+ while IFS= read -r -d ' ' part; do
+ while [[ $part == *"\\" ]]; do
+ part+=" "
+ part="${part//\\/}"
+ IFS= read -r -d ' ' next_part
+ part+=$next_part
+ done
+ CMD_ARRAY+=("$part")
+ done <<< "$CMD"
+ CMD_ARRAY+=("${part%$'\n'}")
+
# execute command
if [[ ! -z "$CMD" ]]; then
echo -e "================================================================================\n"
- exec $CMD
+ exec "${CMD_ARRAY[@]}"
fi
diff --git a/docker-run-cli/src/docker_run/__init__.py b/docker-run-cli/src/docker_run/__init__.py
index 8c336c3..ed32f3f 100644
--- a/docker-run-cli/src/docker_run/__init__.py
+++ b/docker-run-cli/src/docker_run/__init__.py
@@ -1,2 +1,2 @@
__name__ = "docker-run"
-__version__ = "0.9.6"
\ No newline at end of file
+__version__ = "0.9.7"
\ No newline at end of file
diff --git a/docker-run-cli/src/docker_run/core.py b/docker-run-cli/src/docker_run/core.py
index 6b51630..ced9acf 100644
--- a/docker-run-cli/src/docker_run/core.py
+++ b/docker-run-cli/src/docker_run/core.py
@@ -146,6 +146,10 @@ def buildDockerCommand(args: Dict[str, Any], unknown_args: List[str] = [], cmd_a
else:
docker_cmd += ["bash"] # default exec command
+ # plugin modifications
+ for plugin in PLUGINS:
+ docker_cmd = plugin.modifyFinalCommand(docker_cmd, args, unknown_args)
+
return " ".join(docker_cmd)
diff --git a/docker-run-cli/src/docker_run/plugins/core.py b/docker-run-cli/src/docker_run/plugins/core.py
index 38957b2..dc8dd99 100644
--- a/docker-run-cli/src/docker_run/plugins/core.py
+++ b/docker-run-cli/src/docker_run/plugins/core.py
@@ -4,6 +4,8 @@
import tempfile
from typing import Any, Dict, List
+import GPUtil
+
from docker_run.utils import log, runCommand
from docker_run.plugins.plugin import Plugin
@@ -55,6 +57,13 @@ def getExecFlags(cls, args: Dict[str, Any], unknown_args: List[str]) -> List[str
flags += cls.interactiveFlags()
return flags
+ @classmethod
+ def modifyFinalCommand(cls, cmd: List[str], args: Dict[str, Any], unknown_args: List[str]) -> List[str]:
+ if "-v" in cmd or "--volume" in cmd:
+ cmd = cls.resolveRelativeVolumeFlags(cmd)
+ cmd = cls.fixSpacesInVolumeFlags(cmd)
+ return cmd
+
@classmethod
def removeFlags(cls) -> List[str]:
return ["--rm"]
@@ -78,12 +87,16 @@ def localeFlags(cls) -> List[str]:
@classmethod
def gpuSupportFlags(cls) -> List[str]:
- if cls.ARCH == "x86_64":
- return ["--gpus all"]
- elif cls.ARCH == "aarch64" and cls.OS == "Linux":
- return ["--runtime nvidia"]
+ if len(GPUtil.getGPUs()) > 0:
+ if cls.ARCH == "x86_64":
+ return ["--gpus all"]
+ elif cls.ARCH == "aarch64" and cls.OS == "Linux":
+ return ["--runtime nvidia"]
+ else:
+ log(f"GPU not supported by `docker-run` on {cls.OS} with {cls.ARCH} architecture")
+ return []
else:
- log(f"GPU not supported by `docker-run` on {cls.OS} with {cls.ARCH} architecture")
+ log(f"No GPU detected")
return []
@classmethod
@@ -92,7 +105,7 @@ def x11GuiForwardingFlags(cls, docker_network: str = "bridge") -> List[str]:
display = os.environ.get("DISPLAY")
if display is None:
return []
-
+
if cls.OS == "Darwin":
runCommand(f"xhost +local:")
@@ -119,4 +132,22 @@ def x11GuiForwardingFlags(cls, docker_network: str = "bridge") -> List[str]:
@classmethod
def currentDirMountFlags(cls) -> List[str]:
- return [f"--volume {os.getcwd()}:{os.getcwd()}", f"--workdir {os.getcwd()}"]
+ cwd = os.getcwd().replace(" ", "\\ ")
+ return [f"--volume {cwd}:{cwd}", f"--workdir {cwd}"]
+
+ @classmethod
+ def resolveRelativeVolumeFlags(cls, cmd: List[str]) -> List[str]:
+ for i, arg in enumerate(cmd):
+ if arg in ["-v", "--volume"]:
+ mount_path = cmd[i + 1].split(":")[0]
+ if mount_path.startswith("."):
+ absolute_mount_path = os.path.abspath(mount_path)
+ cmd[i + 1] = absolute_mount_path + cmd[i + 1][len(mount_path):]
+ return cmd
+
+ @classmethod
+ def fixSpacesInVolumeFlags(cls, cmd: List[str]) -> List[str]:
+ for i, arg in enumerate(cmd):
+ if arg in ["-v", "--volume"]:
+ cmd[i + 1] = cmd[i + 1].replace(" ", "\\ ")
+ return cmd
\ No newline at end of file
diff --git a/docker-run-cli/src/docker_run/plugins/plugin.py b/docker-run-cli/src/docker_run/plugins/plugin.py
index 38c6c37..44430ea 100644
--- a/docker-run-cli/src/docker_run/plugins/plugin.py
+++ b/docker-run-cli/src/docker_run/plugins/plugin.py
@@ -18,3 +18,7 @@ def getRunFlags(cls, args: Dict[str, Any], unknown_args: List[str]) -> List[str]
@abstractmethod
def getExecFlags(cls, args: Dict[str, Any], unknown_args: List[str]) -> List[str]:
raise NotImplementedError()
+
+ @classmethod
+ def modifyFinalCommand(cls, cmd: List[str], args: Dict[str, Any], unknown_args: List[str]) -> List[str]:
+ return cmd
diff --git a/docker-run-docker-ros/pyproject.toml b/docker-run-docker-ros/pyproject.toml
index 48d58a4..a9e8ac4 100644
--- a/docker-run-docker-ros/pyproject.toml
+++ b/docker-run-docker-ros/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "docker-run-docker-ros"
-version = "1.0.4"
+version = "1.0.5"
description = "docker-run plugin for Docker images built by docker-ros"
license = {file = "LICENSE"}
readme = "README.md"
@@ -23,7 +23,7 @@ classifiers = [
"Operating System :: POSIX :: Linux",
]
keywords = ["docker", "container", "ros"]
-dependencies = ["docker-run-cli>=0.9.4"]
+dependencies = ["docker-run-cli>=0.9.7"]
requires-python = ">=3.7"
[project.urls]
diff --git a/docker-run-docker-ros/src/docker_run/plugins/docker_ros.py b/docker-run-docker-ros/src/docker_run/plugins/docker_ros.py
index 0b4e28d..484d2bf 100644
--- a/docker-run-docker-ros/src/docker_run/plugins/docker_ros.py
+++ b/docker-run-docker-ros/src/docker_run/plugins/docker_ros.py
@@ -52,4 +52,5 @@ def userExecFlags(cls, user: str) -> List[str]:
@classmethod
def currentDirMountWorkspaceFlags(cls) -> List[str]:
- return [f"--volume {os.getcwd()}:{cls.TARGET_MOUNT}", f"--workdir {cls.WORKSPACE}"]
+ cwd = os.getcwd().replace(" ", "\\ ")
+ return [f"--volume {cwd}:{cls.TARGET_MOUNT}", f"--workdir {cls.WORKSPACE}"]
| Handling spaces in paths to be mounted
Currently, [`--mws`](https://github.com/ika-rwth-aachen/docker-run/blob/1063da46b29644b26044a1ec2c78d13ddad4458a/docker-run-docker-ros/src/docker_run/plugins/docker_ros.py#L55) and [`--mwd`](https://github.com/ika-rwth-aachen/docker-run/blob/1063da46b29644b26044a1ec2c78d13ddad4458a/docker-run-cli/src/docker_run/plugins/core.py#L122C40-L122C40) do not support mounting folders that contain spaces in their path.
Possible solution: escaping the spaces in the string returned by `os.getcwd()`.
| 2024-04-04T08:01:52 | 0.0 | [] | [] |
Subsets and Splits