Dataset Viewer
repo
stringlengths 7
60
| instance_id
stringlengths 11
64
| base_commit
stringlengths 40
40
| patch
stringlengths 237
114k
| test_patch
stringclasses 1
value | problem_statement
stringlengths 20
58k
| hints_text
stringlengths 0
67.7k
| created_at
timestamp[ns]date 2015-08-08 06:08:58
2024-12-12 22:07:22
| environment_setup_commit
stringclasses 1
value | version
stringclasses 1
value | FAIL_TO_PASS
sequencelengths 0
0
| PASS_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
lukingroup/pylabnet | lukingroup__pylabnet-258 | 687798ef5220437b5c15d5997eff0f8bb07892b0 | diff --git a/pylabnet/scripts/pulsemaster/pulseblock_constructor.py b/pylabnet/scripts/pulsemaster/pulseblock_constructor.py
index 9f657aa4..eab016f0 100644
--- a/pylabnet/scripts/pulsemaster/pulseblock_constructor.py
+++ b/pylabnet/scripts/pulsemaster/pulseblock_constructor.py
@@ -24,6 +24,10 @@ def __init__(self, name, log, var_dict, config=None):
self.pulseblock = None
self.config = config
+ if "iq_cal_path" in self.config:
+ self.iq_calibration = IQ_Calibration(log=log)
+ self.iq_calibration.load_calibration(self.config["iq_cal_path"])
+
def default_placeholder_value(self, placeholder_name):
for key in Placeholder.default_values:
@@ -112,12 +116,9 @@ def compile_pulseblock(self):
# Handle IQ mixing case
if "iq" in arg_dict and arg_dict["iq"]:
- iq_calibration = IQ_Calibration()
- iq_calibration.load_calibration(self.config["iq_cal_path"])
-
(if_freq, lo_freq, phase_opt,
amp_i_opt, amp_q_opt,
- dc_i_opt, dc_q_opt) = iq_calibration.get_optimal_hdawg_and_LO_values(arg_dict["mod_freq"])
+ dc_i_opt, dc_q_opt) = self.iq_calibration.get_optimal_hdawg_and_LO_values(arg_dict["mod_freq"])
self.log.info(f"if={if_freq}, lo={lo_freq}, phase={phase_opt}")
@@ -149,6 +150,7 @@ def compile_pulseblock(self):
# Construct a pulse and add it to the pulseblock
# The iteration over arg_dict takes care of the IQ mixing case
+ # idx = 0 is the I portion, idx = 1 is the Q portion.
for idx, arg_dict in enumerate(arg_dict_list):
# Construct single pulse.
@@ -158,64 +160,65 @@ def compile_pulseblock(self):
pulse = None
self.log.warn(f"Found an unsupported pulse type {pb_spec.pulsetype}")
- # Store the duration of the first pulse (for IQ mixing) as the
- # pb duration is modified for the second pulse.
- if idx == 0:
- first_dur = pulse.dur
pb_dur = pulseblock.dur
+ prev_t0 = pulseblock.latest_t0
+ prev_dur = pulseblock.latest_dur
- # Insert pulse to correct position in pulseblock.
- if pb_spec.tref == "Absolute":
- pulseblock.append_po_as_pb(
- p_obj=pulse,
- offset=offset-pb_dur
- )
- elif pb_spec.tref == "After Last Pulse":
- if idx == 0:
+ # idx = 0 refers to the I pulse (or a normal non-IQ pulse)
+ if idx == 0:
+ # CASE 1
+ if pb_spec.tref == "Absolute":
+ pulseblock.append_po_as_pb(
+ p_obj=pulse,
+ offset=-pb_dur+offset
+ )
+
+ # CASE 2
+ elif pb_spec.tref in ("After Last Pulse", "At End of Sequence"): # For compatbility with previous naming
pulseblock.append_po_as_pb(
p_obj=pulse,
offset=offset
)
- # Force the 2nd pulse to start at same time as the first
- # pulse in an IQ mix pulse.
- else:
+
+ # CASE 3
+ elif pb_spec.tref in ("With Last Pulse", "With Previous Pulse"): # For compatbility with previous naming
+ # Take timing reference based on the last pulse's t0
pulseblock.append_po_as_pb(
p_obj=pulse,
- offset=-first_dur
+ offset=-pb_dur+prev_t0+offset
)
- elif pb_spec.tref == "After Last Pulse On Channel":
- # Get the end time of the last pulse on the ch
- ch = pb.Channel(name=arg_dict["ch"], is_analog=pulse.is_analog)
- if ch in pulseblock.p_dict.keys():
- last_pulse = pulseblock.p_dict[ch][-1]
- last_pulsetime = last_pulse.t0 + last_pulse.dur
- else:
- last_pulsetime = 0
- pulseblock.append_po_as_pb(
- p_obj=pulse,
- offset=last_pulsetime+offset-pb_dur
- )
- elif pb_spec.tref == "With Last Pulse":
- # Retrieve previous pulseblock:
- if i != 0:
- previous_pb_spec = self.pulse_specifiers[i-1]
- else:
- raise ValueError(
- "Cannot chose timing reference 'With Last Pulse' for first pulse in pulse-sequence."
+
+ # CASE 4
+ elif pb_spec.tref == "After Previous Pulse":
+ # Take timing reference based on the last pulse's t0 and duration
+ pulseblock.append_po_as_pb(
+ p_obj=pulse,
+ offset=-pb_dur+prev_t0+prev_dur+offset
)
- # Retrieve duration of previous pulseblock.
- prev_dur = self.resolve_value(previous_pb_spec.dur) * 1e-6
- if idx == 0:
+
+ # CASE 5
+ elif pb_spec.tref == "After Last Pulse On Channel":
+ # Get the end time of the last pulse on the ch
+ ch = pb.Channel(name=arg_dict["ch"], is_analog=pulse.is_analog)
+ if ch in pulseblock.p_dict.keys():
+ last_pulse = pulseblock.p_dict[ch][-1]
+ last_pulsetime = last_pulse.t0 + last_pulse.dur
+ else:
+ last_pulsetime = 0
+
pulseblock.append_po_as_pb(
p_obj=pulse,
- offset=-prev_dur+offset
+ offset=-pb_dur+last_pulsetime+offset
)
+
+ else:
+ # idx = 1 here (Q pulse)
# Force the 2nd pulse to start at same time as the first
- # pulse in an IQ mix pulse.
- else:
- pulseblock.append_po_as_pb(
+ # pulse in an IQ mix pulse. Note that prev_t0 is the t0 of
+ # the I pulse since this is executed right after the I pulse.
+ pulseblock.append_po_as_pb(
p_obj=pulse,
- offset=-first_dur
+ offset=-pb_dur+prev_t0
)
self.pulseblock = pulseblock
diff --git a/pylabnet/scripts/pulsemaster/pulsemaster.py b/pylabnet/scripts/pulsemaster/pulsemaster.py
index 007bd2db..29ba3a56 100644
--- a/pylabnet/scripts/pulsemaster/pulsemaster.py
+++ b/pylabnet/scripts/pulsemaster/pulsemaster.py
@@ -11,8 +11,7 @@
QFormLayout, QComboBox, QWidget, QTableWidgetItem, QVBoxLayout, \
QTableWidgetItem, QCompleter, QLabel, QLineEdit, QCheckBox, QGridLayout
from PyQt5.QtGui import QKeySequence
-from PyQt5.QtCore import QRect, Qt, QAbstractTableModel
-from PyQt5.QtCore import QVariant
+from PyQt5.QtCore import QRect, Qt, QAbstractTableModel, QTimer, QVariant
from simpleeval import simple_eval, NameNotDefined
@@ -145,6 +144,9 @@ def __init__(self, config, ui='pulsemaster', logger_client=None, server_port=Non
self.add_pb_popup = None
+ # Initialize timers for controlling when text boxes get updated
+ self.timers = []
+
# Initialize preserve_bits checkbox state in dictionary
self.update_preserve_bits()
@@ -157,6 +159,12 @@ def __init__(self, config, ui='pulsemaster', logger_client=None, server_port=Non
# Apply all custom styles
self.apply_custom_styles()
+ # Set the number of plotting points for the pulse preview window
+ if "plot_points" in self.config_dict:
+ self.plot_points = self.config_dict["plot_points"]
+ else:
+ self.plot_points = 800 # Default value
+
self.awg_running = False
def apply_custom_styles(self):
@@ -547,7 +555,7 @@ def prep_plotdata(self, pb_obj):
t1, t2 = new_t1, new_t2
# Draw the current pulse at high grid density
- t_ar = np.linspace(t1, t2, 2000)
+ t_ar = np.linspace(t1, t2, self.plot_points)
x_ar.extend(t_ar)
y_ar.extend(p_item.get_value(t_ar))
@@ -866,11 +874,11 @@ def update_pulse_form_field(self, pulse_specifier, pulse_specifier_field, field_
var_parent_field.setEnabled(True)
var_parent_field.setText("")
- # If the t0 term is variable, we must set to "after last pulse",
+ # If the t0 term is variable, we must set to "At End of Sequence",
# otherwise we have no idea when the pulse happens.
if field_var == "offset_var":
tref_field = widgets_dict["tref"]
- tref_field.setCurrentIndex(tref_field.findText("After Last Pulse"))
+ tref_field.setCurrentIndex(tref_field.findText("At End of Sequence"))
self.update_pulse_form_field(pulse_specifier, tref_field, "tref", widgets_dict, pulse_index)
# Store the updated value in parent
@@ -971,7 +979,12 @@ def get_pulse_specifier_form(self, pulse_specifier, pb_constructor, pulse_index)
elif type(field_input) is QLineEdit:
field_input.setText(str(value))
- field_input.textEdited.connect(pulse_mod_function)
+ # Create a timer to prevent the pulse update function from being called immediately
+ self.timers.append(QTimer())
+ self.timers[-1].setSingleShot(True)
+ self.timers[-1].setInterval(300)
+ self.timers[-1].timeout.connect(pulse_mod_function)
+ field_input.textEdited.connect(self.timers[-1].start)
elif type(field_input) is QCheckBox:
field_input.setChecked(bool(value))
@@ -1211,6 +1224,9 @@ def add_pulseblock_constructors_from_popup(self):
# Close popup
self.add_pb_popup.close()
+ # Update the plotting window (clears it)
+ self.plot_current_pulseblock()
+
def gen_pulse_specifier(self, pulsetype_dict, pulse_data_dict):
""" Generates instance of PulseSpecifier which contain full
information of pulse (Pulsetype, channel_number, pulsetype, pulse_parameters,
@@ -1344,7 +1360,7 @@ def clean_and_validate_pulsedict(self, pulsedict):
if key != "tref":
try:
# Try to resolve arithmetic expression containing variables.
- pulsedict[key] = simple_eval(val, names=self.vars)
+ simple_eval(val, names=self.vars)
except NameNotDefined:
typecast_error.append(key)
validated = False
@@ -1393,9 +1409,9 @@ def read_pulse_params_from_form(self):
return False, None
# Check that the specified channels for IQ are not in the same core
- if len(pulse_ch_list) > 1:
- # Subtract 1 to make 0-indexed
- ch_num_list = [(self.ch_assignment_dict[ch][1] - 1) for ch in pulse_ch_list]
+ # if len(pulse_ch_list) > 1:
+ # # Subtract 1 to make 0-indexed
+ # ch_num_list = [(self.ch_assignment_dict[ch][1] - 1) for ch in pulse_ch_list]
# Divide by 2 to see if same core (e.g. channels 0, 1 // 2 = 0)
# ch_num_list = [ch//2 for ch in ch_num_list]
diff --git a/pylabnet/utils/iq_upconversion/iq_calibration.py b/pylabnet/utils/iq_upconversion/iq_calibration.py
index 20f2c75e..96ba5e01 100644
--- a/pylabnet/utils/iq_upconversion/iq_calibration.py
+++ b/pylabnet/utils/iq_upconversion/iq_calibration.py
@@ -27,8 +27,9 @@
class IQ_Calibration():
- def __init__(self):
+ def __init__(self, log=None):
self.initialized = False
+ self.log = log
def load_calibration(self, filename):
self.initialized = True
@@ -283,7 +284,7 @@ def get_optimal_hdawg_values(self, if_freq, lo_freq):
if (not self.initialized):
raise ValueError("No calibration loaded!")
- #Computing the optimal I and Q amplitudes
+ # Computing the optimal I and Q amplitudes
q_opt, phase_opt = self.get_ampl_phase(if_freq, lo_freq)
amp_i_opt = 2 * q_opt / (1 + q_opt) * self.IF_volt
amp_q_opt = 2 * self.IF_volt / (1 + q_opt)
@@ -293,7 +294,7 @@ def get_optimal_hdawg_values(self, if_freq, lo_freq):
return phase_opt, amp_i_opt, amp_q_opt, dc_i_opt, dc_q_opt
def set_optimal_hdawg_and_LO_values(self, hd, mw_source, freq, HDAWG_ports=[3,4], oscillator=2):
- '''Finds optimnal IF and LO frequencies for given output frequency.
+ '''Finds optimal IF and LO frequencies for given output frequency.
Sets the optimal sine output values on the hdawg for the found IF
and LO frequencies. Will also set the HDAWG's sine frequency and LO
frequency to the correct value.'''
@@ -351,7 +352,7 @@ def get_optimal_hdawg_and_LO_values(self, freq):
for iff in if_f:
lof = freq-iff
- if lof > LO[0] and lof < LO[-1]:
+ if LO[0] < lof < LO[-1]:
hm1, h0, h1, h2, h3 = self.get_harmonic_powers(iff, lof)
fidelity.append(self.get_fidelity(hm1, h0, h1, h2, h3, iff))
else:
diff --git a/pylabnet/utils/pulseblock/pulse_block.py b/pylabnet/utils/pulseblock/pulse_block.py
index e7ee5d14..55c80184 100644
--- a/pylabnet/utils/pulseblock/pulse_block.py
+++ b/pylabnet/utils/pulseblock/pulse_block.py
@@ -2,8 +2,8 @@
import copy
class Channel:
- """ Class to represent a signal channel.
- """
+ """ Class to represent a signal channel.
+ """
def __init__(self, name, is_analog):
self.name = name
self.is_analog = is_analog
@@ -102,6 +102,8 @@ def __init__(self, p_obj_list=None, dflt_dict=None, name='', use_auto_dflt=True)
self.p_dict = dict()
self.dflt_dict = dict()
self.use_auto_dflt = use_auto_dflt
+ self.latest_t0 = 0
+ self.latest_dur = 0
if dflt_dict is not None:
self.dflt_dict = copy.deepcopy(dflt_dict)
@@ -207,7 +209,7 @@ def _insert(self, p_obj, cflct_er=True, use_auto_dflt=True):
)
)
-
+
# Check if the channel already exists with the samne name but a
# different type
for key in self.p_dict.keys():
@@ -237,7 +239,9 @@ def _insert(self, p_obj, cflct_er=True, use_auto_dflt=True):
if use_auto_dflt:
self.dflt_dict[ch] = p_obj.auto_default
-
+ # Update the latest values that have been added to the PB
+ self.latest_t0 = p_obj.t0
+ self.latest_dur = p_obj.dur
def insert(self, p_obj, cflct_er=True):
""" Insert a new Pulse object into PulseBlock
@@ -434,6 +438,10 @@ def insert_pb(self, pb_obj, t0=0, cflct_er=True):
pb_obj.dflt_dict[ch]
)
+ # Update the latest values that have been added to the PB
+ self.latest_t0 = t0
+ self.latest_dur = pb_obj.dur
+
def join_pb(self, pb_obj, t0=0, cflct_er=True, name=''):
""" Same as insert_pb(), but instead of modifying self,
a new PulseBlock is created. Self is not altered.
| Clear pulse preview window when new pb is created
| 2021-06-29T00:07:29 | 0.0 | [] | [] |
|||
funkelab/motile_tracker | funkelab__motile_tracker-23 | 65bc35cecc8ee9deac66d0d8b330aafd5ffbd561 | diff --git a/src/motile_plugin/widgets/run_editor.py b/src/motile_plugin/widgets/run_editor.py
index ee78f0a..4567c74 100644
--- a/src/motile_plugin/widgets/run_editor.py
+++ b/src/motile_plugin/widgets/run_editor.py
@@ -4,13 +4,12 @@
from typing import TYPE_CHECKING
from warnings import warn
+import magicgui.widgets
+import napari.layers
import numpy as np
-from fonticon_fa6 import FA6S
from motile_plugin.backend.motile_run import MotileRun
-from napari.layers import Labels
from qtpy.QtCore import Signal
from qtpy.QtWidgets import (
- QComboBox,
QGroupBox,
QHBoxLayout,
QLabel,
@@ -20,13 +19,11 @@
QVBoxLayout,
QWidget,
)
-from superqt.fonticon import icon
from .params_editor import SolverParamsEditor
if TYPE_CHECKING:
import napari
- import napari.layers
logger = logging.getLogger(__name__)
@@ -48,7 +45,7 @@ def __init__(self, viewer: napari.Viewer):
self.solver_params_widget = SolverParamsEditor()
self.run_name: QLineEdit
self.refresh_layer_button: QPushButton
- self.layer_selection_box: QComboBox
+ self.layer_selection_box: magicgui.widgets.Widget
main_layout = QVBoxLayout()
main_layout.addWidget(self._run_widget())
@@ -68,59 +65,41 @@ def _labels_layer_widget(self) -> QWidget:
layer_group = QWidget()
layer_layout = QHBoxLayout()
layer_layout.setContentsMargins(0, 0, 0, 0)
- layer_layout.addWidget(QLabel("Input Layer:"))
-
- # Layer selection combo box
- self.layer_selection_box = QComboBox()
- self.update_labels_layers()
- self.layer_selection_box.setToolTip(
+ label = QLabel("Input Layer:")
+ layer_layout.addWidget(label)
+ label.setToolTip(
"Select the labels layer you want to use for tracking"
)
- size_policy = self.layer_selection_box.sizePolicy()
- size_policy.setHorizontalPolicy(QSizePolicy.MinimumExpanding)
- self.layer_selection_box.setSizePolicy(size_policy)
- layer_layout.addWidget(self.layer_selection_box)
- # Refresh button
- self.refresh_layer_button = QPushButton(
- icon=icon(FA6S.arrows_rotate, color="white")
- )
- self.refresh_layer_button.setToolTip(
- "Refresh this selection box with current napari layers"
+ # # Layer selection combo box
+ self.layer_selection_box = magicgui.widgets.create_widget(
+ annotation=napari.layers.Labels
)
- self.refresh_layer_button.clicked.connect(self.update_labels_layers)
- layer_layout.addWidget(self.refresh_layer_button)
+ layers_events = self.viewer.layers.events
+ layers_events.inserted.connect(self.layer_selection_box.reset_choices)
+ layers_events.removed.connect(self.layer_selection_box.reset_choices)
+ layers_events.reordered.connect(self.layer_selection_box.reset_choices)
+
+ qlayer_select = self.layer_selection_box.native
+
+ size_policy = qlayer_select.sizePolicy()
+ size_policy.setHorizontalPolicy(QSizePolicy.MinimumExpanding)
+ qlayer_select.setSizePolicy(size_policy)
+ layer_layout.addWidget(qlayer_select)
layer_group.setLayout(layer_layout)
return layer_group
- def update_labels_layers(self) -> None:
- """Update the layer selection box with the labels layers in the viewer"""
- self.layer_selection_box.clear()
- for layer in self.viewer.layers:
- if isinstance(layer, Labels):
- self.layer_selection_box.addItem(layer.name)
- if len(self.layer_selection_box) == 0:
- self.layer_selection_box.addItem("None")
-
def get_labels_data(self) -> np.ndarray | None:
"""Get the input segmentation given the current selection in the
layer dropdown.
Returns:
np.ndarray | None: The data of the labels layer with the name
- that is selected, or None if the layer name is not present in
- the viewer or is not a labels layer.
+ that is selected, or None if no layer is selected.
"""
- layer_name = self.layer_selection_box.currentText()
- if layer_name == "None" or layer_name not in self.viewer.layers:
- return None
- layer = self.viewer.layers[layer_name]
- if not isinstance(layer, Labels):
- warn(
- f"Layer {layer_name} is not a Labels layer. List refresh needed",
- stacklevel=2,
- )
+ layer = self.layer_selection_box.value
+ if layer is None:
return None
return layer.data
| Use magicgui to get labels layers that is mostly synced (no refresh button)
| 2024-05-10T11:05:35 | 0.0 | [] | [] |
|||
1QB-Information-Technologies/ccvm | 1QB-Information-Technologies__ccvm-174 | a170953051be11f6c91eb2f58a207c0a0a402b0c | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4393cb2f..838a170e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,12 +15,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Restructured the metadata object to include a `device` field, a
`result_metadata` list, and a `metadata_dict`.
-### Fixed
+### Fixed
- Fixed issue where `s` was being updated incorrectly on each iteration of `DLSolver._solve()`.
+- Fixed the calculation of `solve_time` and `pp_time` for all solvers to reflect the time
+for a single calculation of an instance intead of the time to solve the full batch
### Added
- Added `PumpedLangevinSolver`, which is an extension of `LangevinSolver` to simulate pumped Langevin dynamics with a demo script in the examples directory.
- Implemented a simple gradient descent post-processing step, as described in the paper, similar to Langevin dynamics but without noise; uses the Euler method with box constraint imposition at each iteration.
+- Added a scaling coefficient for the feedback term in `dl-ccvm` as an input to the solver
### Changed
- Streamlined README by relocating and optimizing architecture diagrams.
@@ -29,6 +32,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Consolidated test organization by centralizing all tests under a unified
`tests` folder, with subdirectories for unit, integration, and test data. This
enhances accessibility and clarity in managing test-related resources.
+- Optimized default parameters of `dl-ccvm`, `mf-ccvm`, and `langevin_solver` to reflect the findings of our latest research
+- Changed the default value of `num_iter_main` in the `grad_descent` post-processor to produce
+better results for the default value.
- Updated the `add_metadata` function in the Metadata class to
`add_to_result_metadata` for clarity.
diff --git a/README.md b/README.md
index c134ee96..aa7c9a5e 100644
--- a/README.md
+++ b/README.md
@@ -122,8 +122,8 @@ print(f"The best known solution to this problem is {solution.optimal_value}")
print(f"The best objective value found by the solver was {solution.best_objective_value}")
# The best objective value found by the solver was 798.1630859375
-print(f"The solving process took {solution.solve_time} seconds")
-# The solving process took 8.949262142181396 seconds
+print(f"The solving process took effectively {solution.solve_time} seconds to solve a single instance")
+# The solving process took 0.008949262142181396 seconds
```
## Documentation
@@ -144,7 +144,7 @@ Thank you for considering making a contribution to our project! We appreciate y
## References
-This repository contains architectures and simulators presented in the paper ["Non-convex Quadratic Programming Using Coherent Optical Networks"](https://arxiv.org/abs/2209.04415) by Farhad Khosravi, Ugur Yildiz, Artur Scherer, and Pooya Ronagh.
+This repository contains architectures and simulators presented in the paper ["Non-convex Quadratic Programming Using Coherent Optical Networks"](https://arxiv.org/abs/2209.04415) by Farhad Khosravi, Ugur Yildiz, Martin Perreault, Artur Scherer, and Pooya Ronagh.
## License
diff --git a/ccvm_simulators/post_processor/grad_descent.py b/ccvm_simulators/post_processor/grad_descent.py
index 4d4193a7..0ca38eb4 100644
--- a/ccvm_simulators/post_processor/grad_descent.py
+++ b/ccvm_simulators/post_processor/grad_descent.py
@@ -17,7 +17,7 @@ def postprocess(
v_vector,
lower_clamp=0.0,
upper_clamp=1.0,
- num_iter_main=100,
+ num_iter_main=1000,
num_iter_pp=None,
step_size=0.1,
):
diff --git a/ccvm_simulators/solution.py b/ccvm_simulators/solution.py
index 8b781889..8917d66d 100644
--- a/ccvm_simulators/solution.py
+++ b/ccvm_simulators/solution.py
@@ -15,7 +15,7 @@ class Solution:
objective_values (torch.Tensor): The objective values of the solutions
found by the solver.
iterations (int): The iteration number for this problem size.
- solve_time (float): Time to solve the problem.
+ solve_time (float): The effective time to solve the problem instance only once.
pp_time (float): Time to post-process the problem.
optimal_value (float): The optimal objective value for the given problem instance.
best_value (float): The best objective value for the given problem instance.
diff --git a/ccvm_simulators/solvers/dl_solver.py b/ccvm_simulators/solvers/dl_solver.py
index 9baf1473..95a7c757 100644
--- a/ccvm_simulators/solvers/dl_solver.py
+++ b/ccvm_simulators/solvers/dl_solver.py
@@ -7,7 +7,7 @@
import torch.distributions as tdist
import time
-DL_SCALING_MULTIPLIER = 0.5
+DL_SCALING_MULTIPLIER = 0.2
"""The value used by the DLSolver when calculating a scaling value in
super.get_scaling_factor()"""
@@ -76,7 +76,9 @@ def parameter_key(self):
@parameter_key.setter
def parameter_key(self, parameters):
- expected_dlparameter_key_set = set(["pump", "dt", "iterations", "noise_ratio"])
+ expected_dlparameter_key_set = set(
+ ["pump", "dt", "iterations", "noise_ratio", "feedback_scale"]
+ )
parameter_key_list = parameters.values()
# Iterate over the parameters for each given problem size
for parameter_key in parameter_key_list:
@@ -93,7 +95,7 @@ def parameter_key(self, parameters):
self._parameter_key = parameters
self._is_tuned = False
- def _calculate_drift_boxqp(self, c, s, pump, rate, S=1):
+ def _calculate_drift_boxqp(self, c, s, pump, rate, feedback_scale=100, S=1):
"""We treat the SDE that simulates the CIM of NTT as drift
calculation.
@@ -122,8 +124,9 @@ def _calculate_drift_boxqp(self, c, s, pump, rate, S=1):
s_grad_2 = torch.einsum("cj,cj -> cj", -1 - (pump * rate) - c_pow - s_pow, s)
s_grad_3 = self.v_vector / 2 / S
- c_drift = -c_grad_1 + c_grad_2 - c_grad_3
- s_drift = -s_grad_1 + s_grad_2 - s_grad_3
+ feedback_scale_dynamic = feedback_scale * (0.5 + rate)
+ c_drift = -feedback_scale_dynamic * (c_grad_1 + c_grad_3) + c_grad_2
+ s_drift = -feedback_scale_dynamic * (s_grad_1 + s_grad_3) + s_grad_2
return c_drift, s_drift
def _calculate_grads_boxqp(self, c, s, S=1):
@@ -238,6 +241,7 @@ def _solve(
dt,
iterations,
noise_ratio,
+ feedback_scale,
pump_rate_flag,
g,
evolution_step_size,
@@ -287,7 +291,9 @@ def _solve(
noise_ratio_i = (noise_ratio - 1) * np.exp(-(i + 1) / iterations * 3) + 1
- c_drift, s_drift = self.calculate_drift(c, s, pump, pump_rate)
+ c_drift, s_drift = self.calculate_drift(
+ c, s, pump, pump_rate, feedback_scale
+ )
wiener_increment_c = (
wiener_dist_c.sample((problem_size,)).transpose(0, 1)
* np.sqrt(dt)
@@ -576,6 +582,7 @@ def __call__(
dt = self.parameter_key[problem_size]["dt"]
iterations = self.parameter_key[problem_size]["iterations"]
noise_ratio = self.parameter_key[problem_size]["noise_ratio"]
+ feedback_scale = self.parameter_key[problem_size]["feedback_scale"]
except KeyError as e:
raise KeyError(
f"The parameter '{e.args[0]}' for the given instance size is not defined."
@@ -639,6 +646,7 @@ def __call__(
dt,
iterations,
noise_ratio,
+ feedback_scale,
pump_rate_flag,
g,
evolution_step_size,
@@ -655,6 +663,7 @@ def __call__(
dt,
iterations,
noise_ratio,
+ feedback_scale,
pump_rate_flag,
g,
evolution_step_size,
@@ -666,8 +675,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer for the solve
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Run the post processor on the results, if specified
if post_processor:
@@ -678,7 +690,8 @@ def __call__(
problem_variables = post_processor_object.postprocess(
self.change_variables(c, S), self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = c
pp_time = 0.0
diff --git a/ccvm_simulators/solvers/langevin_solver.py b/ccvm_simulators/solvers/langevin_solver.py
index 8a9e1b28..dd9a1e8b 100644
--- a/ccvm_simulators/solvers/langevin_solver.py
+++ b/ccvm_simulators/solvers/langevin_solver.py
@@ -7,7 +7,7 @@
import torch.distributions as tdist
import time
-LANGEVIN_SCALING_MULTIPLIER = 0.5
+LANGEVIN_SCALING_MULTIPLIER = 0.05
"""The value used by the LangevinSolver when calculating a scaling value in
super.get_scaling_factor()"""
@@ -534,8 +534,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer for the solve
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Run the post processor on the results, if specified
if post_processor:
@@ -546,7 +549,8 @@ def __call__(
problem_variables = post_processor_object.postprocess(
c, self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = c
pp_time = 0.0
diff --git a/ccvm_simulators/solvers/mf_solver.py b/ccvm_simulators/solvers/mf_solver.py
index 321b8695..2aa4e248 100644
--- a/ccvm_simulators/solvers/mf_solver.py
+++ b/ccvm_simulators/solvers/mf_solver.py
@@ -7,7 +7,7 @@
import torch.distributions as tdist
import time
-MF_SCALING_MULTIPLIER = 0.1
+MF_SCALING_MULTIPLIER = 0.05
"""The value used by the MFSolver when calculating a scaling value in
super.get_scaling_factor()"""
@@ -679,8 +679,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Run the post processor on the results, if specified
if post_processor:
@@ -689,12 +692,10 @@ def __call__(
)
problem_variables = post_processor_object.postprocess(
- self.change_variables(mu_tilde, S),
- self.q_matrix,
- self.v_vector,
- device=device,
+ self.change_variables(mu_tilde, S), self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = self.change_variables(mu_tilde, S)
pp_time = 0.0
diff --git a/ccvm_simulators/solvers/pumped_langevin_solver.py b/ccvm_simulators/solvers/pumped_langevin_solver.py
index dcce5a00..78dd22e6 100644
--- a/ccvm_simulators/solvers/pumped_langevin_solver.py
+++ b/ccvm_simulators/solvers/pumped_langevin_solver.py
@@ -288,9 +288,7 @@ def _solve(
):
# Update the record of the sample values with the values found at
# this iteration
- self.c_sample[
- :, :, samples_taken
- ] = c
+ self.c_sample[:, :, samples_taken] = c
samples_taken += 1
return c
@@ -442,8 +440,8 @@ def __call__(
evolution_file=None,
algorithm_parameters=None,
):
- """Solves the box-constrained programming problem using the pumped Langevin solver using
- either Adam algorithm for the calculation of the gradient of the objective function or
+ """Solves the box-constrained programming problem using the pumped Langevin solver using
+ either Adam algorithm for the calculation of the gradient of the objective function or
the simple gradient descent method. This choice can be set in the argument of `algorithm_parameters`.
Args:
@@ -578,8 +576,11 @@ def __call__(
f"Solver option type {type(algorithm_parameters)} is not supported."
)
- # Stop the timer for the solve
- solve_time = time.time() - solve_time_start
+ # Stop the timer for the solve to compute the solution time for solving an instance once
+ # Due to the division by batch_size, the solve_time improves for larger batches
+ # when the solver is run on GPU. This is expected since GPU is hardware specifically
+ # deployed to improve the solution time of solving one single instance by using parallelization
+ solve_time = (time.time() - solve_time_start) / batch_size
# Calibrate the variable
c_prime = (c + S) / (2 * S)
@@ -593,7 +594,8 @@ def __call__(
problem_variables = post_processor_object.postprocess(
c_prime, self.q_matrix, self.v_vector
)
- pp_time = post_processor_object.pp_time
+ # Post-processing time for solving an instance once
+ pp_time = post_processor_object.pp_time / batch_size
else:
problem_variables = c_prime
pp_time = 0.0
diff --git a/examples/ccvm_boxqp_dl.py b/examples/ccvm_boxqp_dl.py
index d582f017..a9387c44 100644
--- a/examples/ccvm_boxqp_dl.py
+++ b/examples/ccvm_boxqp_dl.py
@@ -14,7 +14,13 @@
# Supply solver parameters for different problem sizes
solver.parameter_key = {
- 20: {"pump": 2.0, "dt": 0.005, "iterations": 15000, "noise_ratio": 10},
+ 20: {
+ "pump": 8.0,
+ "feedback_scale": 100,
+ "dt": 0.001,
+ "iterations": 1500,
+ "noise_ratio": 10,
+ },
}
# Load test instances to solve
diff --git a/examples/ccvm_boxqp_mf.py b/examples/ccvm_boxqp_mf.py
index 2c56c699..f8d30ed9 100644
--- a/examples/ccvm_boxqp_mf.py
+++ b/examples/ccvm_boxqp_mf.py
@@ -15,12 +15,12 @@
# Supply solver parameters for different problem sizes
solver.parameter_key = {
20: {
- "pump": 0.5,
- "feedback_scale": 20,
- "j": 20,
- "S": 0.2,
+ "pump": 0.0,
+ "feedback_scale": 4000,
+ "j": 5.0,
+ "S": 20.0,
"dt": 0.0025,
- "iterations": 15000,
+ "iterations": 1500,
}
}
@@ -42,10 +42,10 @@
# (2) algorithm_parameters=AdamParameters(..) for the Adam algorithm
solution = solver(
instance=boxqp_instance,
- post_processor=None,
- algorithm_parameters=AdamParameters(
- alpha=0.001, beta1=0.9, beta2=0.999, add_assign=False
- ),
+ post_processor="grad-descent",
+ # algorithm_parameters=AdamParameters(
+ # alpha=0.001, beta1=0.9, beta2=0.999, add_assign=False
+ # ),
)
print(solution)
diff --git a/examples/langevin_boxqp.py b/examples/langevin_boxqp.py
index a5a7a2eb..d39331cb 100644
--- a/examples/langevin_boxqp.py
+++ b/examples/langevin_boxqp.py
@@ -15,9 +15,9 @@
# Supply solver parameters for different problem sizes
solver.parameter_key = {
20: {
- "dt": 0.005,
- "iterations": 15000,
- "sigma": 0.02,
+ "dt": 0.002,
+ "iterations": 1500,
+ "sigma": 0.5,
"feedback_scale": 1.0,
},
}
@@ -40,7 +40,7 @@
# (2) algorithm_parameters=AdamParameters(..) for the Adam algorithm
solution = solver(
instance=boxqp_instance,
- post_processor=None,
+ post_processor="grad-descent",
# algorithm_parameters=AdamParameters(
# alpha=0.001, beta1=0.9, beta2=0.999, add_assign=False
# ),
diff --git a/examples/pumped_langevin_boxqp.py b/examples/pumped_langevin_boxqp.py
index 87d19b95..9b9cf46c 100644
--- a/examples/pumped_langevin_boxqp.py
+++ b/examples/pumped_langevin_boxqp.py
@@ -19,7 +19,7 @@
20: {
"pump": 2.0, # p0
"dt": 0.002,
- "iterations": 15000,
+ "iterations": 1500,
"sigma": 0.5,
"feedback_scale": 1.0,
},
@@ -43,7 +43,7 @@
# (2) algorithm_parameters=AdamParameters(..) for the Adam algorithm
solution = solver(
instance=boxqp_instance,
- post_processor=None,
+ post_processor="grad-descent",
# algorithm_parameters=AdamParameters(
# alpha=0.001, beta1=0.9, beta2=0.999, add_assign=True
# ),
| Updating the solvers and examples files to reproduce the results of the paper
Currently, the solvers are not producing good results. Some of the parameters of the solvers need to be adjusted to reproduce the results of the paper. This also requires some changes to the code of the solvers such as `langevin_solver`.
| 2024-02-29T16:48:39 | 0.0 | [] | [] |
|||
IN-CORE/pyincore | IN-CORE__pyincore-101 | ff26791e2cde1ff75f79b72869183f74dc842ac8 | diff --git a/pyincore/analyses/joplincge/equationlib.py b/pyincore/analyses/joplincge/equationlib.py
index bd90a05cf..3ebce8328 100644
--- a/pyincore/analyses/joplincge/equationlib.py
+++ b/pyincore/analyses/joplincge/equationlib.py
@@ -77,7 +77,7 @@ def set_value(self, name, values, target):
if 'nrows' in info and 'ncols' in info:
values = pd.DataFrame(index=info['rows'], columns=info['cols']).fillna(values)
elif 'nrows' in info and 'ncols' not in info:
- values = pd.Series(index=info['rows']).fillna(values)
+ values = pd.Series(index=info['rows'], dtype='float64').fillna(values)
if type(values) == pd.DataFrame:
rows = values.index.tolist()
@@ -193,7 +193,7 @@ def get(self, name, x=None):
for j in info['cols']:
ret.at[i, j] = x[self.get_index(name, row=i, col=j)]
elif 'nrows' in info and 'ncols' not in info:
- ret = pd.Series(index=info['rows']).fillna(0.0)
+ ret = pd.Series(index=info['rows'], dtype='float64').fillna(0.0)
for i in info['rows']:
ret.at[i] = x[self.get_index(name, row=i)]
elif 'nrows' not in info and 'ncols' not in info:
diff --git a/pyincore/analyses/joplincge/joplincge.py b/pyincore/analyses/joplincge/joplincge.py
index 35a2d6022..ae8b50b9e 100644
--- a/pyincore/analyses/joplincge/joplincge.py
+++ b/pyincore/analyses/joplincge/joplincge.py
@@ -398,16 +398,16 @@ def run(self):
ALPHA = pd.DataFrame(index=F, columns=I).fillna(0.0)
B = pd.DataFrame(index=I, columns=IG).fillna(0.0)
B1 = pd.DataFrame(index=I, columns=I).fillna(0.0)
- CMOWAGE = pd.Series(index=CM).fillna(0.0)
- CMIWAGE = pd.Series(index=L).fillna(0.0)
+ CMOWAGE = pd.Series(index=CM, dtype='float64').fillna(0.0)
+ CMIWAGE = pd.Series(index=L, dtype='float64').fillna(0.0)
FCONST = pd.DataFrame(index=F, columns=I).fillna(0.0)
- GAMMA = pd.Series(index=I).fillna(0.0)
- DELTA = pd.Series(index=I).fillna(0.0)
+ GAMMA = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DELTA = pd.Series(index=I, dtype='float64').fillna(0.0)
PIT = pd.DataFrame(index=G, columns=H).fillna(0.0)
- PRIVRET = pd.Series(index=H).fillna(0.0)
- LFOR = pd.Series(index=LA).fillna(0.0)
- KFOR = pd.Series(index=K).fillna(0.0)
- GFOR = pd.Series(index=G).fillna(0.0)
+ PRIVRET = pd.Series(index=H, dtype='float64').fillna(0.0)
+ LFOR = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ KFOR = pd.Series(index=K, dtype='float64').fillna(0.0)
+ GFOR = pd.Series(index=G, dtype='float64').fillna(0.0)
out = pd.DataFrame(index=G, columns=G).fillna(0.0)
TAUFH = pd.DataFrame(index=G, columns=F).fillna(0.0)
TAUFL = pd.DataFrame(index=G, columns=L).fillna(0.0)
@@ -423,39 +423,39 @@ def run(self):
TAUX = pd.DataFrame(index=G, columns=IG).fillna(0.0)
TAUG = pd.DataFrame(index=G, columns=I).fillna(0.0)
TAXS = pd.DataFrame(index=G, columns=G).fillna(0.0)
- TAXS1 = pd.Series(index=GNL).fillna(0.0)
+ TAXS1 = pd.Series(index=GNL, dtype='float64').fillna(0.0)
# ELASTICITIES AND TAX DATA IMPOSED
BETA = pd.DataFrame(index=I, columns=H).fillna(0.0)
- ETAD = pd.Series(index=I).fillna(0.0)
- ETAE = pd.Series(index=I).fillna(0.0)
- ETAI = pd.Series(index=IG).fillna(0.0)
+ ETAD = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAE = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAI = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETAIX = pd.DataFrame(index=K, columns=IG).fillna(0.0)
ETAL = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- ETAL1 = pd.Series(index=IG).fillna(0.0)
- ETALB1 = pd.Series(index=IG).fillna(0.0)
+ ETAL1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ ETALB1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETALB = pd.DataFrame(index=L, columns=IG).fillna(0.0)
- ETAM = pd.Series(index=I).fillna(0.0)
- ETAYDO = pd.Series(index=H).fillna(0.0)
- ETAYDI = pd.Series(index=H).fillna(0.0)
- ETAUO = pd.Series(index=H).fillna(0.0)
- ETAUI = pd.Series(index=H).fillna(0.0)
- ETARA = pd.Series(index=H).fillna(0.0)
- ETAPT = pd.Series(index=H).fillna(0.0)
- ETAPIT = pd.Series(index=H).fillna(0.0)
-
- EXWGEO = pd.Series(index=CM).fillna(0.0)
- EXWGEI = pd.Series(index=L).fillna(0.0)
- ECOMI = pd.Series(index=L).fillna(0.0)
- ECOMO = pd.Series(index=CM).fillna(0.0)
+ ETAM = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAYDO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAYDI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETARA = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPT = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPIT = pd.Series(index=H, dtype='float64').fillna(0.0)
+
+ EXWGEO = pd.Series(index=CM, dtype='float64').fillna(0.0)
+ EXWGEI = pd.Series(index=L, dtype='float64').fillna(0.0)
+ ECOMI = pd.Series(index=L, dtype='float64').fillna(0.0)
+ ECOMO = pd.Series(index=CM, dtype='float64').fillna(0.0)
JOBCOR = pd.DataFrame(index=H, columns=L).fillna(0.0)
OUTCOR = pd.DataFrame(index=H, columns=CM).fillna(0.0)
LAMBDA = pd.DataFrame(index=I, columns=I).fillna(0.0)
- NRPG = pd.Series(index=H).fillna(0.0)
- depr = pd.Series(index=IG).fillna(0.1)
+ NRPG = pd.Series(index=H, dtype='float64').fillna(0.0)
+ depr = pd.Series(index=IG, dtype='float64').fillna(0.1)
# ARRAYS BUILT TO EXPORT RESULTS TO SEPARATE FILE
@@ -468,60 +468,60 @@ def run(self):
CG0T = pd.DataFrame(index=I, columns=G).fillna(0.0)
CH0 = pd.DataFrame(index=I, columns=H).fillna(0.0)
CH0T = pd.DataFrame(index=I, columns=H).fillna(0.0)
- CMI0 = pd.Series(index=L).fillna(0.0)
- CMO0 = pd.Series(index=CM).fillna(0.0)
- CN0 = pd.Series(index=I).fillna(0.0)
- CN0T = pd.Series(index=I).fillna(0.0)
- CPI0 = pd.Series(index=H).fillna(0.0)
- CPIN0 = pd.Series(index=H).fillna(0.0)
- CPIH0 = pd.Series(index=H).fillna(0.0)
- CX0 = pd.Series(index=I).fillna(0.0)
- D0 = pd.Series(index=I).fillna(0.0)
- DD0 = pd.Series(index=Z).fillna(0.0)
- DS0 = pd.Series(index=Z).fillna(0.0)
- mine = pd.Series(index=Z).fillna(0.0)
- DQ0 = pd.Series(index=Z).fillna(0.0)
+ CMI0 = pd.Series(index=L, dtype='float64').fillna(0.0)
+ CMO0 = pd.Series(index=CM, dtype='float64').fillna(0.0)
+ CN0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CN0T = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CPI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CX0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ D0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DD0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DS0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ mine = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DQ0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
FD0 = pd.DataFrame(index=F, columns=Z).fillna(0.0)
IGT0 = pd.DataFrame(index=G, columns=G).fillna(0.0)
KS0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
LAS0 = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- HH0 = pd.Series(index=H).fillna(0.0)
- HN0 = pd.Series(index=H).fillna(0.0)
- HW0 = pd.Series(index=H).fillna(0.0)
- M0 = pd.Series(index=I).fillna(0.0)
- M01 = pd.Series(index=Z).fillna(0.0)
- MI0 = pd.Series(index=H).fillna(0.0)
- MO0 = pd.Series(index=H).fillna(0.0)
+ HH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HW0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ M0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ M01 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ MI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ MO0 = pd.Series(index=H, dtype='float64').fillna(0.0)
N0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
# NKIO
- KPFOR01 = pd.Series(index=K).fillna(0.0)
- KPFOR0 = pd.Series(index=K).fillna(0.0)
- LNFOR0 = pd.Series(index=LA).fillna(0.0)
- LNFOR01 = pd.Series(index=LA).fillna(0.0)
- GVFOR0 = pd.Series(index=G).fillna(0.0)
- P0 = pd.Series(index=IG).fillna(0.0)
- PD0 = pd.Series(index=I).fillna(0.0)
- PVA0 = pd.Series(index=I).fillna(0.0)
- PWM0 = pd.Series(index=I).fillna(0.0)
- Q0 = pd.Series(index=Z).fillna(0.0)
- Q10 = pd.Series(index=Z).fillna(0.0)
+ KPFOR01 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ KPFOR0 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ LNFOR0 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ LNFOR01 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ GVFOR0 = pd.Series(index=G, dtype='float64').fillna(0.0)
+ P0 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ PD0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PVA0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PWM0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ Q0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ Q10 = pd.Series(index=Z, dtype='float64').fillna(0.0)
R0 = pd.DataFrame(index=F, columns=Z).fillna(1.0)
- RA0 = pd.Series(index=F).fillna(0.0)
- S0 = pd.Series(index=Z).fillna(0.0)
+ RA0 = pd.Series(index=F, dtype='float64').fillna(0.0)
+ S0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
# SPIO
- V0 = pd.Series(index=I).fillna(0.0)
- V0T = pd.Series(index=I).fillna(0.0)
+ V0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ V0T = pd.Series(index=I, dtype='float64').fillna(0.0)
TP = pd.DataFrame(index=H, columns=G).fillna(0.0)
# TAUF0 = Table(G,F,Z)
- YD0 = pd.Series(index=H).fillna(0.0)
- Y0 = pd.Series(index=Z).fillna(0.0)
+ YD0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ Y0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
# -------------------------------------------------------------------------------------------------------------
# CALCULATIONS OF PARAMETERS AND INITIAL VALUES
@@ -540,7 +540,6 @@ def run(self):
BETA = pd.concat([MISC.loc[I, ['ETAY']]] * len(H), axis=1)
BETA.columns = H
- # LAMBDA = pd.concat([MISC.loc[I, ['ETAOP']]]*len(I), axis=1)
LAMBDA.columns = I
for i in I:
LAMBDA.loc[[i], [i]] = MISC.at[i, 'ETAOP']
@@ -785,8 +784,12 @@ def run(self):
# RA0.loc[F] = 1.0
# create data frame for factor taxes by sector
- a = pd.Series(index=I).fillna(0.0)
- a = SAM.loc[USSOCL, I].append(a, ignore_index=True).append(SAM.loc[GL, I]) # labor, land, capital
+ a = SAM.loc[USSOCL, I].reset_index(drop=True)
+ # add a row with zeros
+ a.loc[len(a)] = [0.0] * len(I)
+ # add a row with PTAXJop data
+ a = pd.concat([a, SAM.loc[GL, I]]) # labor, land, capital
+
a.index = F
ALPHA.loc[F, I] = (SAM.loc[F, I] + a.loc[F, I]) / (SAM.loc[F, I].sum(0) + SAM.loc[GF, I].sum(0))
@@ -1819,15 +1822,17 @@ def run_solver(cons_filename, temp_file_name):
households = ["HH1", "HH2", "HH3", "HH4", "HH5"]
labor_groups = ["L1", "L2", "L3", "L4", "L5"]
sectors = ["Goods", "Trades", "Others", "HS1", "HS2", "HS3"]
+ # note TRADE vs TRADES, OTHERS vs OTHER in capitalized sectors
+ sectors_cap = ["GOODS", "TRADE", "OTHER", "HS1", "HS2", "HS3"]
FD0.insert(loc=0, column="Labor Group", value=labor_groups)
FDL.insert(loc=0, column="Labor Group", value=labor_groups)
- gross_income = {"Household Group": households, "Y0": Y0.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index(),
- "YL": YL.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index()}
- hh = {"Household Group": households[:5], "HH0": HH0.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index(),
- "HHL": HHL.loc[{"HH1", "HH2", "HH3", "HH4", "HH5"}].sort_index()}
- ds = {"Sectors": sectors, "DS0": DS0.loc[{"GOODS", "TRADE", "OTHER", "HS1", "HS2", "HS3"}].sort_index(),
- "DSL": vars.get('DS', result[-1]).loc[{"GOODS", "TRADE", "OTHER", "HS1", "HS2", "HS3"}].sort_index()}
+ gross_income = {"Household Group": households, "Y0": Y0.loc[households].sort_index(),
+ "YL": YL.loc[households].sort_index()}
+ hh = {"Household Group": households[:5], "HH0": HH0.loc[households].sort_index(),
+ "HHL": HHL.loc[households].sort_index()}
+ ds = {"Sectors": sectors, "DS0": DS0.loc[sectors_cap].sort_index(),
+ "DSL": vars.get('DS', result[-1]).loc[sectors_cap].sort_index()}
self.set_result_csv_data("domestic-supply", pd.DataFrame(ds), name="domestic-supply",
source="dataframe")
diff --git a/pyincore/analyses/seasidecge/equationlib.py b/pyincore/analyses/seasidecge/equationlib.py
index 8e013f678..bd1ab0775 100644
--- a/pyincore/analyses/seasidecge/equationlib.py
+++ b/pyincore/analyses/seasidecge/equationlib.py
@@ -84,7 +84,7 @@ def set_value(self, name, values, target):
if 'nrows' in info and 'ncols' in info:
values = pd.DataFrame(index=info['rows'], columns=info['cols']).fillna(values)
elif 'nrows' in info and 'ncols' not in info:
- values = pd.Series(index=info['rows']).fillna(values)
+ values = pd.Series(index=info['rows'], dtype='float64').fillna(values)
if type(values) == pd.DataFrame:
rows = values.index.tolist()
@@ -197,7 +197,7 @@ def get(self, name, x=None):
for j in info['cols']:
ret.at[i, j] = x[self.getIndex(name, row=i, col=j)]
elif 'nrows' in info and 'ncols' not in info:
- ret = pd.Series(index=info['rows']).fillna(0.0)
+ ret = pd.Series(index=info['rows'], dtype='float64').fillna(0.0)
for i in info['rows']:
ret.at[i] = x[self.getIndex(name, row=i)]
elif 'nrows' not in info and 'ncols' not in info:
diff --git a/pyincore/analyses/seasidecge/seasidecge.py b/pyincore/analyses/seasidecge/seasidecge.py
index 15829e414..e1aea0cd5 100644
--- a/pyincore/analyses/seasidecge/seasidecge.py
+++ b/pyincore/analyses/seasidecge/seasidecge.py
@@ -392,20 +392,20 @@ def _(x):
AD = pd.DataFrame(index=Z, columns=Z).fillna(0.0)
AG = pd.DataFrame(index=Z, columns=G).fillna(0.0)
AGFS = pd.DataFrame(index=Z, columns=G).fillna(0.0)
- SIGMA = pd.Series(index=I).fillna(0.0)
+ SIGMA = pd.Series(index=I, dtype='float64').fillna(0.0)
ALPHA = pd.DataFrame(index=F, columns=I).fillna(0.0)
ALPHA1 = pd.DataFrame(index=F, columns=I).fillna(0.0)
B = pd.DataFrame(index=I, columns=IG).fillna(0.0)
B1 = pd.DataFrame(index=I, columns=I).fillna(0.0)
- CMIWAGE = pd.Series(index=L).fillna(0.0)
+ CMIWAGE = pd.Series(index=L, dtype='float64').fillna(0.0)
FCONST = pd.DataFrame(index=F, columns=I).fillna(0.0)
- GAMMA = pd.Series(index=I).fillna(0.0)
- DELTA = pd.Series(index=I).fillna(0.0)
+ GAMMA = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DELTA = pd.Series(index=I, dtype='float64').fillna(0.0)
PIT = pd.DataFrame(index=G, columns=H).fillna(0.0)
- PRIVRET = pd.Series(index=H).fillna(0.0)
- LFOR = pd.Series(index=LA).fillna(0.0)
- KFOR = pd.Series(index=K).fillna(0.0)
- GFOR = pd.Series(index=G).fillna(0.0)
+ PRIVRET = pd.Series(index=H, dtype='float64').fillna(0.0)
+ LFOR = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ KFOR = pd.Series(index=K, dtype='float64').fillna(0.0)
+ GFOR = pd.Series(index=G, dtype='float64').fillna(0.0)
out = pd.DataFrame(index=G, columns=G).fillna(0.0)
TAUFH = pd.DataFrame(index=G, columns=F).fillna(0.0)
TAUFL = pd.DataFrame(index=G, columns=L).fillna(0.0)
@@ -421,43 +421,43 @@ def _(x):
TAUX = pd.DataFrame(index=G, columns=IG).fillna(0.0)
TAUG = pd.DataFrame(index=G, columns=I).fillna(0.0)
TAXS = pd.DataFrame(index=G, columns=G).fillna(0.0)
- TAXS1 = pd.Series(index=GNL).fillna(0.0)
+ TAXS1 = pd.Series(index=GNL, dtype='float64').fillna(0.0)
# ELASTICITIES AND TAX DATA IMPOSED
BETA = pd.DataFrame(index=I, columns=H).fillna(0.0)
BETAH = pd.DataFrame(index=HD, columns=H).fillna(0.0)
- ETAD = pd.Series(index=I).fillna(0.0)
- ETAE = pd.Series(index=I).fillna(0.0)
- ETAI = pd.Series(index=IG).fillna(0.0)
+ ETAD = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAE = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAI = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETAIX = pd.DataFrame(index=K, columns=IG).fillna(0.0)
ETAL = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- ETAL1 = pd.Series(index=IG).fillna(0.0)
- ETALB1 = pd.Series(index=IG).fillna(0.0)
+ ETAL1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ ETALB1 = pd.Series(index=IG, dtype='float64').fillna(0.0)
ETALB = pd.DataFrame(index=L, columns=IG).fillna(0.0)
- ETAM = pd.Series(index=I).fillna(0.0)
- ETAYD = pd.Series(index=H).fillna(0.0)
- ETAYDO = pd.Series(index=H).fillna(0.0)
- ETAYDI = pd.Series(index=H).fillna(0.0)
- ETAU = pd.Series(index=H).fillna(0.0)
- ETAUO = pd.Series(index=H).fillna(0.0)
- ETAUI = pd.Series(index=H).fillna(0.0)
- ETARA = pd.Series(index=H).fillna(0.0)
- ETAPT = pd.Series(index=H).fillna(0.0)
- ETAPIT = pd.Series(index=H).fillna(0.0)
-
- EXWGEI = pd.Series(index=L).fillna(0.0)
- ECOMI = pd.Series(index=L).fillna(0.0)
+ ETAM = pd.Series(index=I, dtype='float64').fillna(0.0)
+ ETAYD = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAYDO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAYDI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAU = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUO = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAUI = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETARA = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPT = pd.Series(index=H, dtype='float64').fillna(0.0)
+ ETAPIT = pd.Series(index=H, dtype='float64').fillna(0.0)
+
+ EXWGEI = pd.Series(index=L, dtype='float64').fillna(0.0)
+ ECOMI = pd.Series(index=L, dtype='float64').fillna(0.0)
JOBCOR = pd.DataFrame(index=H, columns=L).fillna(0.0)
LAMBDA = pd.DataFrame(index=I, columns=I).fillna(0.0)
LAMBDAH = pd.DataFrame(index=HD, columns=HD1).fillna(0.0)
- NRPG = pd.Series(index=H).fillna(0.0)
- depr = pd.Series(index=IG).fillna(0.1)
+ NRPG = pd.Series(index=H, dtype='float64').fillna(0.0)
+ depr = pd.Series(index=IG, dtype='float64').fillna(0.1)
- RHO = pd.Series(index=I).fillna(0.0)
+ RHO = pd.Series(index=I, dtype='float64').fillna(0.0)
TT = pd.DataFrame(index=F, columns=IG).fillna(0.0)
# ARRAYS BUILT TO EXPORT RESULTS TO SEPARATE FILE
@@ -471,60 +471,60 @@ def _(x):
CG0T = pd.DataFrame(index=I, columns=G).fillna(0.0)
CH0 = pd.DataFrame(index=I, columns=H).fillna(0.0)
CH0T = pd.DataFrame(index=I, columns=H).fillna(0.0)
- CMI0 = pd.Series(index=L).fillna(0.0)
- CN0 = pd.Series(index=I).fillna(0.0)
- CN0T = pd.Series(index=I).fillna(0.0)
- CPI0 = pd.Series(index=H).fillna(0.0)
- CPIN0 = pd.Series(index=H).fillna(0.0)
- CPIH0 = pd.Series(index=H).fillna(0.0)
- CX0 = pd.Series(index=I).fillna(0.0)
- D0 = pd.Series(index=I).fillna(0.0)
- DD0 = pd.Series(index=Z).fillna(0.0)
- DS0 = pd.Series(index=Z).fillna(0.0)
- DQ0 = pd.Series(index=Z).fillna(0.0)
+ CMI0 = pd.Series(index=L, dtype='float64').fillna(0.0)
+ CN0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CN0T = pd.Series(index=I, dtype='float64').fillna(0.0)
+ CPI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CPIH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ CX0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ D0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ DD0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DS0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ DQ0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
FD0 = pd.DataFrame(index=F, columns=Z).fillna(0.0)
IGT0 = pd.DataFrame(index=G, columns=GX).fillna(0.0)
KS0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW = pd.DataFrame(index=K, columns=IG).fillna(0.0)
KSNEW0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
LAS0 = pd.DataFrame(index=LA, columns=IG).fillna(0.0)
- HH0 = pd.Series(index=H).fillna(0.0)
- HN0 = pd.Series(index=H).fillna(0.0)
- HW0 = pd.Series(index=H).fillna(0.0)
- M0 = pd.Series(index=I).fillna(0.0)
- M01 = pd.Series(index=Z).fillna(0.0)
- MI0 = pd.Series(index=H).fillna(0.0)
- MO0 = pd.Series(index=H).fillna(0.0)
+ HH0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HN0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ HW0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ M0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ M01 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ MI0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ MO0 = pd.Series(index=H, dtype='float64').fillna(0.0)
N0 = pd.DataFrame(index=K, columns=IG).fillna(0.0)
# NKIO
- KPFOR01 = pd.Series(index=K).fillna(0.0)
- KPFOR0 = pd.Series(index=K).fillna(0.0)
- LNFOR0 = pd.Series(index=LA).fillna(0.0)
- LNFOR01 = pd.Series(index=LA).fillna(0.0)
- GVFOR0 = pd.Series(index=G).fillna(0.0)
- P0 = pd.Series(index=IG).fillna(0.0)
- PH0 = pd.Series(index=HD).fillna(0.0)
- PD0 = pd.Series(index=I).fillna(0.0)
- PVA0 = pd.Series(index=I).fillna(0.0)
- PWM0 = pd.Series(index=I).fillna(0.0)
- PW0 = pd.Series(index=I).fillna(0.0)
- Q0 = pd.Series(index=Z).fillna(0.0)
- Q10 = pd.Series(index=Z).fillna(0.0)
+ KPFOR01 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ KPFOR0 = pd.Series(index=K, dtype='float64').fillna(0.0)
+ LNFOR0 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ LNFOR01 = pd.Series(index=LA, dtype='float64').fillna(0.0)
+ GVFOR0 = pd.Series(index=G, dtype='float64').fillna(0.0)
+ P0 = pd.Series(index=IG, dtype='float64').fillna(0.0)
+ PH0 = pd.Series(index=HD, dtype='float64').fillna(0.0)
+ PD0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PVA0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PWM0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ PW0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ Q0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
+ Q10 = pd.Series(index=Z, dtype='float64').fillna(0.0)
R0 = pd.DataFrame(index=F, columns=Z).fillna(1.0)
- RA0 = pd.Series(index=F).fillna(0.0)
- S0 = pd.Series(index=Z).fillna(0.0)
+ RA0 = pd.Series(index=F, dtype='float64').fillna(0.0)
+ S0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
# SPIO
- V0 = pd.Series(index=I).fillna(0.0)
- V0T = pd.Series(index=I).fillna(0.0)
+ V0 = pd.Series(index=I, dtype='float64').fillna(0.0)
+ V0T = pd.Series(index=I, dtype='float64').fillna(0.0)
TP = pd.DataFrame(index=H, columns=G).fillna(0.0)
# TAUF0 = Table(G,F,Z)
- YD0 = pd.Series(index=H).fillna(0.0)
- Y0 = pd.Series(index=Z).fillna(0.0)
+ YD0 = pd.Series(index=H, dtype='float64').fillna(0.0)
+ Y0 = pd.Series(index=Z, dtype='float64').fillna(0.0)
for label in G1:
out.loc[label, label] = 0
@@ -870,13 +870,15 @@ def _(x):
PVA0.loc[I] = PD0.loc[I] - (
AD.loc[I, I].mul(P0.loc[I], axis='index').mul(1.0 + TAUQ.loc[GS, I].sum(0).T, axis='index').sum(0).T)
- RHO.loc[I] = (1 - SIGMA.loc[I]) / SIGMA.loc[I];
+ RHO.loc[I] = (1 - SIGMA.loc[I]) / SIGMA.loc[I]
# RA0.loc[F] = 1.0
# create data frame for factor taxes by sector
-
- a = pd.Series(index=I).fillna(0.0)
- a = SAM.loc[USSOCL, I].append(a, ignore_index=True).append(SAM.loc[GL, I]) # labor, land, capital
+ a = SAM.loc[USSOCL, I].reset_index(drop=True)
+ # add a row with zeros
+ a.loc[len(a)] = [0.0] * len(I)
+ # add a row with PROPTX data
+ a = pd.concat([a, SAM.loc[GL, I]]) # labor, land, capital
a.index = F
ALPHA.loc[F, I] = (SAM.loc[F, I] + a.loc[F, I]) / (SAM.loc[F, I].sum(0) + SAM.loc[GF, I].sum(0))
| Pandas warning - default dtype for empty series will be 'object' instead of 'float64' in a future version
joplin_cge.ipynb and seaside_cge.ipynb under incore-docs/notebooks has warning from pandas
> FutureWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.
the dump of warning message is in attached.
[pandas-warning.log](https://github.com/IN-CORE/pyincore/files/8044002/pandas-warning.log)
| 2022-02-16T17:16:43 | 0.0 | [] | [] |
|||
redhat-cip/hardware | redhat-cip__hardware-196 | d9f97ce64a9df1be207613b311e4d1dff98120ac | diff --git a/hardware/areca.py b/hardware/areca.py
index f61654c1..55b74d94 100644
--- a/hardware/areca.py
+++ b/hardware/areca.py
@@ -19,6 +19,9 @@
from subprocess import Popen
import sys
+from hardware import detect_utils
+
+
SEP_REGEXP = re.compile(r"\s*:\s*")
@@ -147,6 +150,13 @@ def _disable_password():
def detect():
"""Detect Areca controller configuration."""
+ if not detect_utils.which('cli64'):
+ sys.stderr.write('Cannot find cli64 binary\n')
+ return []
+ return detect_areca()
+
+
+def detect_areca():
hwlist = []
device = _sys_info()
if not device:
@@ -193,6 +203,4 @@ def detect():
if len(hwlist):
return hwlist
- # If we dont't detect any areca controller, return None
- # This avoid having empty lists
- return None
+ return []
| Improving the output...
Meanwhile, I can run `apt install python3-hardware` and then `hardware-detect --human` but the output
comes with a few error messages that could be easily catched:
```
/bin/sh: 1: cli64: not found
Info: No Areca controller found
Cannot find megacli on the system
read_smart: Reading S.M.A.R.T information on /dev/sdb
read_smart: Reading S.M.A.R.T information on /dev/sdb with -d ata
read_smart: no device /dev/sdb
read_smart: Reading S.M.A.R.T information on /dev/sda
read_smart: Reading S.M.A.R.T information on /dev/sda with -d ata
read_smart: no device /dev/sda
modprobe: FATAL: Module ipmi_smb not found in directory /lib/modules/6.1.0-5-amd64
Info: Probing ipmi_si failed
Info: Probing ipmi_devintf failed
IANA PEN registry open failed: No such file or directory
Info: No Infiniband device found
IANA PEN registry open failed: No such file or directory
/bin/sh: 1: Syntax error: end of file unexpected
Unable to run hp-conrep:
[('hpa', 'slots', 'count', '2'),
```
The one with cli64: not found, Syntax error?, Unable to run...
| 2023-04-03T15:39:37 | 0.0 | [] | [] |
|||
PaddlePaddle/PARL | PaddlePaddle__PARL-1076 | 2d48f6ced3ded581732bbe39152bf3934eac782f | diff --git a/parl/utils/utils.py b/parl/utils/utils.py
index 3d169d1ef..991b7ad5f 100644
--- a/parl/utils/utils.py
+++ b/parl/utils/utils.py
@@ -64,6 +64,32 @@ def get_fluid_version():
_HAS_PADDLE = False
_HAS_TORCH = False
+def check_installed_framework_in_windows():
+ global _HAS_FLUID, _HAS_PADDLE, _HAS_TORCH
+ # paddle & fluid
+ try:
+ _HAS_FLUID = False
+ _HAS_PADDLE = False
+ import paddle
+ from paddle import fluid
+
+ paddle_version = get_fluid_version()
+ logger.info("paddlepaddle version: {}.".format(paddle.__version__))
+ if paddle_version < 200 and paddle_version != 0:
+ assert paddle_version >= 185, "PARL requires paddle >= 1.8.5 and paddle < 2.0.0"
+ _HAS_FLUID = True
+ else:
+ _HAS_PADDLE = True
+ except ImportError as e:
+ _HAS_FLUID = False
+ _HAS_PADDLE = False
+ # torch
+ try:
+ import torch
+ _HAS_TORCH = True
+ except ImportError:
+ _HAS_TORCH = False
+
def check_installed_framework():
def check(installed_framework):
try:
@@ -101,11 +127,15 @@ def check(installed_framework):
_HAS_TORCH = installed_framework['_HAS_TORCH']
del manager, installed_framework
-check_installed_framework()
_IS_WINDOWS = (sys.platform == 'win32')
_IS_MAC = (sys.platform == 'darwin')
+if _IS_WINDOWS:
+ check_installed_framework_in_windows()
+else:
+ check_installed_framework()
+
def kill_process(regex_pattern):
"""kill process whose execution commnad is matched by regex pattern
| pickleçéï¼æ æ³import parl
å¨import parlæ¶ï¼æ¥AttributeError: Can't pickle local object 'check_installed_framework.<locals>.check'çéï¼å¨parl\utils\utils.py in <module>æ件ä¸ã
| æè°¢ä½ çåé¦ï¼éº»ç¦æä¾ä¸ä½ çpyçæ¬ãè¿è¡ç³»ç»ã以åparlçæ¬ã
æç¨çæ¯python 3.9.13ï¼osæ¯Windows11ï¼parlççæ¬æ¯2.2ã
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ12æ¥(ææ天) æä¸10:28
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
æè°¢ä½ çåé¦ï¼éº»ç¦æä¾ä¸ä½ çpyçæ¬ãè¿è¡ç³»ç»ã以åparlçæ¬ã
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
好çï¼æ们ä»å¤©ç¡®è®¤ä¸é®é¢å¹¶ä¿®å¤ï¼å¨è¿æé´å¯ä»¥ä½¿ç¨parl2.1
好çï¼æè°¢
---åå§é®ä»¶---
å件人: "Bo ***@***.***>
åéæ¶é´: 2023å¹´3æ13æ¥(å¨ä¸) ä¸å9:02
æ¶ä»¶äºº: ***@***.***>;
æé: ***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
好çï¼æ们ä»å¤©ç¡®è®¤ä¸é®é¢å¹¶ä¿®å¤ï¼å¨è¿æé´å¯ä»¥ä½¿ç¨parl2.1
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
好çï¼æåé®ä¸ä¸å¦ä½ä¸è½½parl2.1
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å9:02
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
好çï¼æ们ä»å¤©ç¡®è®¤ä¸é®é¢å¹¶ä¿®å¤ï¼å¨è¿æé´å¯ä»¥ä½¿ç¨parl2.1
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
pip install parl==2.1
æè
python -m pip install parl==2.1
æå·²ç»å®è£
好äºï¼æå¨ä½¿ç¨2.1æ¶ï¼'Agent' object has no attribute 'fluid_executor'ï¼fluid_executoræ¯è¢«å¼ç¨äºåï¼è¯·é®ç¨ä»ä¹æ¿ä»£ç
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:07
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
pip install parl==2.1
æè
python -m pip install parl==2.1
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
æ¯çï¼fluid_executorå·²ç»åºå¼äºï¼è¿æ¯éæå¾æ¶ä»£çæ¥å£ã
parlå¨2.0å¼å§å°±å
¨é¢è½¬ç§»å°å¨æå¾äºï¼å¦æå¸æ继ç»ä½¿ç¨fluid_executorï¼å¯ä»¥å®è£
1.4å以ä¸ççæ¬ã
æ æ³å®è£
1.4å以ä¸çæ¬ï¼è¯·é®æä»ä¹åï¼ç´æ¥pip install parl==1.4ä¼å¤±è´¥ã
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:11
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
æ¯çï¼fluid_executorå·²ç»åºå¼äºï¼è¿æ¯éæå¾æ¶ä»£çæ¥å£ã
parlå¨2.0å¼å§å°±å
¨é¢è½¬ç§»å°å¨æå¾äºï¼å¦æå¸æ继ç»ä½¿ç¨fluid_executorï¼å¯ä»¥å®è£
1.4å以ä¸ççæ¬ã
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
å¢ï¼æ¯å 为parlå¨åè¡1.4çæ¶åï¼py39è¿æ²¡åºæ¥ããä½ å¨py39çç¯å¢ä¸æ¯æ¾ä¸å°1.4çæ¬çãè¿æ ·çè¯ï¼ä½ è¿å¾é级pythonã建议éç¨anacondaæ¥é
ç½®ä¸åçæ¬pythonç¯å¢ã
å®æ¹ç°å¨æä»ä¹ææ°å¦ä¹ parlçææ¡£æè
è§é¢æç¨åï¼æèªå·±æ¹ä¸ä¸ä»£ç ï¼æè
1.4çéè¦ä»ä¹çæ¬çpython
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:22
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
å¢ï¼æ¯å 为parlå¨åè¡1.4çæ¶åï¼py39è¿æ²¡åºæ¥ããä½ å¨py39çç¯å¢ä¸æ¯æ¾ä¸å°1.4çæ¬çãè¿æ ·çè¯ï¼ä½ è¿å¾é级pythonã建议éç¨anacondaæ¥é
ç½®ä¸åçæ¬pythonç¯å¢ã
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***>
ç§èå¸çæç¨æ¯3å¹´åçäºï¼å
³äºææ°å¦ä¹ ææ¡£çè¯ï¼ç®åæ们建议ç´æ¥çææ¡£+quick_startçexampleæ¥çæå¨æå¾çparlã
ä¸æææ¡£å¯ä»¥åèè¿éï¼https://github.com/PaddlePaddle/PARL/blob/develop/docs/zh_CN/Overview.md
è±æççè¿éï¼https://parl.readthedocs.io/en/latest/index.html
å¦å¤parl1.4çæ¬éè¦python3.7.
å·²æ¶å°ï¼æè°¢ã
------------------ åå§é®ä»¶ ------------------
å件人: "PaddlePaddle/PARL" ***@***.***>;
åéæ¶é´: 2023å¹´3æ13æ¥(ææä¸) ä¸å10:27
***@***.***>;
***@***.******@***.***>;
主é¢: Re: [PaddlePaddle/PARL] pickleçéï¼æ æ³import parl (Issue #1075)
ç§èå¸çæç¨æ¯3å¹´åçäºï¼å
³äºææ°å¦ä¹ ææ¡£çè¯ï¼ç®åæ们建议ç´æ¥çææ¡£+quick_startçexampleæ¥çæå¨æå¾çparlã
ä¸æææ¡£å¯ä»¥åèè¿éï¼https://github.com/PaddlePaddle/PARL/blob/develop/docs/zh_CN/Overview.md
è±æççè¿éï¼https://parl.readthedocs.io/en/latest/index.html
å¦å¤parl1.4çæ¬éè¦python3.7.
â
Reply to this email directly, view it on GitHub, or unsubscribe.
You are receiving this because you authored the thread.Message ID: ***@***.***> | 2023-03-13T03:25:24 | 0.0 | [] | [] |
||
MannLabs/alphadia | MannLabs__alphadia-382 | bd9d1857781e9342acd0b1ba9b5a037ee5a53cc4 | diff --git a/alphadia/outputaccumulator.py b/alphadia/outputaccumulator.py
index 16b8b808..9f1e8547 100644
--- a/alphadia/outputaccumulator.py
+++ b/alphadia/outputaccumulator.py
@@ -78,7 +78,8 @@ def _calculate_fragment_position(self):
def parse_output_folder(
self,
folder: str,
- selected_precursor_columns: list[str] | None = None,
+ mandatory_precursor_columns: list[str] | None = None,
+ optional_precursor_columns: list[str] | None = None,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""
Parse the output folder to get a precursor and fragment dataframe in the flat format.
@@ -87,7 +88,7 @@ def parse_output_folder(
----------
folder : str
The output folder to be parsed.
- selected_precursor_columns : list, optional
+ mandatory_precursor_columns : list, optional
The columns to be selected from the precursor dataframe, by default ['precursor_idx', 'sequence', 'flat_frag_start_idx', 'flat_frag_stop_idx', 'charge', 'rt_library', 'mobility_library', 'mz_library', 'proteins', 'genes', 'mods', 'mod_sites', 'proba']
Returns
@@ -99,8 +100,8 @@ def parse_output_folder(
"""
- if selected_precursor_columns is None:
- selected_precursor_columns = [
+ if mandatory_precursor_columns is None:
+ mandatory_precursor_columns = [
"precursor_idx",
"sequence",
"flat_frag_start_idx",
@@ -108,12 +109,10 @@ def parse_output_folder(
"charge",
"rt_library",
"rt_observed",
- "rt_calibrated",
"mobility_library",
"mobility_observed",
"mz_library",
"mz_observed",
- "mz_calibrated",
"proteins",
"genes",
"mods",
@@ -121,16 +120,28 @@ def parse_output_folder(
"proba",
"decoy",
]
+
+ if optional_precursor_columns is None:
+ optional_precursor_columns = [
+ "rt_calibrated",
+ "mz_calibrated",
+ ]
+
psm_df = pd.read_parquet(os.path.join(folder, "psm.parquet"))
frag_df = pd.read_parquet(os.path.join(folder, "frag.parquet"))
- assert set(
- selected_precursor_columns
- ).issubset(
- psm_df.columns
- ), f"selected_precursor_columns must be a subset of psm_df.columns didnt find {set(selected_precursor_columns) - set(psm_df.columns)}"
- psm_df = psm_df[selected_precursor_columns]
- # validate.precursors_flat_from_output(psm_df)
+ if not set(mandatory_precursor_columns).issubset(psm_df.columns):
+ raise ValueError(
+ f"mandatory_precursor_columns must be a subset of psm_df.columns didnt find {set(mandatory_precursor_columns) - set(psm_df.columns)}"
+ )
+
+ available_columns = sorted(
+ list(
+ set(mandatory_precursor_columns)
+ | (set(optional_precursor_columns) & set(psm_df.columns))
+ )
+ )
+ psm_df = psm_df[available_columns]
# get foldername of the output folder
foldername = os.path.basename(folder)
@@ -260,9 +271,6 @@ def __init__(self, folders: list, number_of_processes: int):
self._lock = threading.Lock() # Lock to prevent two processes trying to update the same subscriber at the same time
def subscribe(self, subscriber: BaseAccumulator):
- assert isinstance(
- subscriber, BaseAccumulator
- ), f"subscriber must be an instance of BaseAccumulator, got {type(subscriber)}"
self._subscribers.append(subscriber)
def _update_subscriber(
@@ -420,14 +428,21 @@ def post_process(self):
Post process the consensus_speclibase by normalizing retention times.
"""
+ norm_delta_max = self._norm_delta_max
+ if "rt_calibrated" not in self.consensus_speclibase.precursor_df.columns:
+ logger.warning(
+ "rt_calibrated not found in the precursor_df, delta-max normalization will not be performed"
+ )
+ norm_delta_max = False
+
+ logger.info("Performing quality control for transfer learning.")
+ logger.info(f"Normalize by delta: {norm_delta_max}")
logger.info(
- "Performing quality control for transfer learning."
- + f"Normalize by delta: {self._norm_delta_max}"
- + f"Precursor correlation cutoff: {self._precursor_correlation_cutoff}"
- + f"Fragment correlation cutoff: {self._fragment_correlation_ratio}"
+ f"Precursor correlation cutoff: {self._precursor_correlation_cutoff}"
)
+ logger.info(f"Fragment correlation cutoff: {self._fragment_correlation_ratio}")
- if self._norm_delta_max:
+ if norm_delta_max:
self.consensus_speclibase = normalize_rt_delta_max(
self.consensus_speclibase
)
@@ -563,13 +578,20 @@ def ms2_quality_control(
# calculate the median correlation for the precursor
intensity_mask = flat_intensity > 0.0
- median_correlation = np.median(flat_correlation[intensity_mask])
+ median_correlation = (
+ np.median(flat_correlation[intensity_mask]) if intensity_mask.any() else 0.0
+ )
# use the precursor for MS2 learning if the median correlation is above the cutoff
use_for_ms2[i] = median_correlation > precursor_correlation_cutoff
- fragment_intensity_view[:] = fragment_intensity_view * (
- fragment_correlation_view > median_correlation * fragment_correlation_ratio
+ # Fix: Use loc to modify the original DataFrame instead of the view
+ spec_lib_base.fragment_intensity_df.loc[start_idx:stop_idx] = (
+ fragment_intensity_view.values
+ * (
+ fragment_correlation_view
+ > median_correlation * fragment_correlation_ratio
+ )
)
spec_lib_base.precursor_df["use_for_ms2"] = use_for_ms2
diff --git a/alphadia/workflow/peptidecentric.py b/alphadia/workflow/peptidecentric.py
index 54d0e8f9..b03c05c0 100644
--- a/alphadia/workflow/peptidecentric.py
+++ b/alphadia/workflow/peptidecentric.py
@@ -260,6 +260,36 @@ def norm_to_rt(
else:
raise ValueError(f"Unknown norm_rt_mode {mode}")
+ def get_precursor_mz_column(self):
+ """Get the precursor m/z column name.
+ This function will return `mz_calibrated` if precursor calibration has happened, otherwise it will return `mz_library`.
+ If no MS1 data is present, it will always return `mz_library`.
+
+ Returns
+ -------
+ str
+ Name of the precursor m/z column
+
+ """
+ return (
+ f"mz_{self.optimization_manager.column_type}"
+ if self.dia_data.has_ms1
+ else "mz_library"
+ )
+
+ def get_fragment_mz_column(self):
+ return f"mz_{self.optimization_manager.column_type}"
+
+ def get_rt_column(self):
+ return f"rt_{self.optimization_manager.column_type}"
+
+ def get_mobility_column(self):
+ return (
+ f"mobility_{self.optimization_manager.column_type}"
+ if self.dia_data.has_mobility
+ else "mobility_library"
+ )
+
def get_ordered_optimizers(self):
"""Select appropriate optimizers. Targeted optimization is used if a valid target value (i.e. a number greater than 0) is specified in the config;
if a value less than or equal to 0 is supplied, automatic optimization is used.
@@ -480,6 +510,7 @@ def search_parameter_optimization(self):
log_string(
"==============================================", verbosity="progress"
)
+
if insufficient_precursors_to_optimize:
precursor_df_filtered, fragments_df_filtered = self.filter_dfs(
precursor_df, self.optlock.fragments_df
@@ -759,14 +790,10 @@ def extract_batch(
batch_precursor_df,
batch_fragment_df,
config.jitclass(),
- rt_column=f"rt_{self.optimization_manager.column_type}",
- mobility_column=f"mobility_{self.optimization_manager.column_type}"
- if self.dia_data.has_mobility
- else "mobility_library",
- precursor_mz_column=f"mz_{self.optimization_manager.column_type}"
- if self.dia_data.has_ms1
- else "mz_library",
- fragment_mz_column=f"mz_{self.optimization_manager.column_type}",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
fwhm_rt=self.optimization_manager.fwhm_rt,
fwhm_mobility=self.optimization_manager.fwhm_mobility,
)
@@ -806,14 +833,10 @@ def extract_batch(
batch_precursor_df,
batch_fragment_df,
config=config,
- rt_column=f"rt_{self.optimization_manager.column_type}",
- mobility_column=f"mobility_{self.optimization_manager.column_type}"
- if self.dia_data.has_mobility
- else "mobility_library",
- precursor_mz_column=f"mz_{self.optimization_manager.column_type}"
- if self.dia_data.has_ms1
- else "mz_library",
- fragment_mz_column=f"mz_{self.optimization_manager.column_type}",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
)
features_df, fragments_df = candidate_scoring(
@@ -1051,12 +1074,10 @@ def requantify(self, psm_df):
self.spectral_library.precursor_df_unfiltered,
self.spectral_library.fragment_df,
config=config,
- precursor_mz_column="mz_calibrated",
- fragment_mz_column="mz_calibrated",
- rt_column="rt_calibrated",
- mobility_column="mobility_calibrated"
- if self.dia_data.has_mobility
- else "mobility_library",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
)
multiplexed_candidates["rank"] = 0
@@ -1144,8 +1165,10 @@ def requantify_fragments(
candidate_speclib_flat.precursor_df,
candidate_speclib_flat.fragment_df,
config=config,
- precursor_mz_column="mz_calibrated",
- fragment_mz_column="mz_calibrated",
+ rt_column=self.get_rt_column(),
+ mobility_column=self.get_mobility_column(),
+ precursor_mz_column=self.get_precursor_mz_column(),
+ fragment_mz_column=self.get_fragment_mz_column(),
)
# we disregard the precursors, as we want to keep the original scoring from the top12 search
| Transfer Leanring extraction failes after calibration failure of file 3/8
**Describe the bug**
Transfer Leanring extraction failes after calibration failure of file 3/8.
**To Reproduce**
Linux Ubuntu, AlphaDIA 1.8.1
[log.txt](https://github.com/user-attachments/files/17814527/log.txt)
| 2024-11-22T12:59:01 | 0.0 | [] | [] |
|||
jupyterlab/retrolab | jupyterlab__retrolab-138 | fd73915a598199c0fba4492c9e5f16a2895a8475 | diff --git a/README.md b/README.md
index 887ae428..c1741a4c 100644
--- a/README.md
+++ b/README.md
@@ -52,6 +52,20 @@ jupyter labextension list
Should also be available when starting `retrolab`.
+### Launching
+
+From an open notebook:
+
+1. Click the RetroLab button in the toolbar; or
+2. View > Open in RetroLab from the menu
+
+To access the main RetroLab tree (file browser):
+
+1. Help > Launch RetroLab File Browser from the menu; or
+2. Go to the /retro URL path of your Jupyter site
+
+## Tour
+
### Files ð and Running Sessions ðâï¸

diff --git a/packages/lab-extension/src/index.ts b/packages/lab-extension/src/index.ts
index 1fffea28..5d5cf53f 100644
--- a/packages/lab-extension/src/index.ts
+++ b/packages/lab-extension/src/index.ts
@@ -15,6 +15,8 @@ import { DocumentRegistry } from '@jupyterlab/docregistry';
import { IMainMenu } from '@jupyterlab/mainmenu';
+import { ITranslator } from '@jupyterlab/translation';
+
import {
INotebookModel,
INotebookTracker,
@@ -35,6 +37,7 @@ namespace CommandIDs {
* Toggle Top Bar visibility
*/
export const openRetro = 'retrolab:open';
+ export const launchRetroTree = 'retrolab:launchtree';
}
/**
@@ -124,9 +127,45 @@ const openRetro: JupyterFrontEndPlugin<void> = {
}
};
+/**
+ * A plugin to add a command to open the RetroLab Tree.
+ */
+const launchRetroTree: JupyterFrontEndPlugin<void> = {
+ id: '@retrolab/lab-extension:launch-retrotree',
+ autoStart: true,
+ requires: [ITranslator],
+ optional: [IMainMenu, ICommandPalette],
+ activate: (
+ app: JupyterFrontEnd,
+ translator: ITranslator,
+ menu: IMainMenu | null,
+ palette: ICommandPalette | null
+ ): void => {
+ const { commands } = app;
+ const trans = translator.load('jupyterlab');
+ const category = trans.__('Help');
+
+ commands.addCommand(CommandIDs.launchRetroTree, {
+ label: trans.__('Launch RetroLab File Browser'),
+ execute: () => {
+ window.open(PageConfig.getBaseUrl() + 'retro/tree');
+ }
+ });
+
+ if (menu) {
+ const helpMenu = menu.helpMenu;
+ helpMenu.addGroup([{ command: CommandIDs.launchRetroTree }], 1);
+ }
+
+ if (palette) {
+ palette.addItem({ command: CommandIDs.launchRetroTree, category });
+ }
+ }
+};
+
/**
* Export the plugins as default.
*/
-const plugins: JupyterFrontEndPlugin<any>[] = [openRetro];
+const plugins: JupyterFrontEndPlugin<any>[] = [launchRetroTree, openRetro];
export default plugins;
diff --git a/retrolab/app.py b/retrolab/app.py
index f17601bc..8b7e2fba 100644
--- a/retrolab/app.py
+++ b/retrolab/app.py
@@ -77,6 +77,12 @@ def get_page_config(self):
return page_config
+class RetroRedirectHandler(RetroHandler):
+ @web.authenticated
+ def get(self):
+ return self.redirect(self.base_url+'retro/tree')
+
+
class RetroTreeHandler(RetroHandler):
@web.authenticated
def get(self, path=None):
@@ -152,6 +158,7 @@ def initialize_handlers(self):
{"url": "/retro/edit/{0}"},
)
)
+ self.handlers.append(("/retro/?", RetroRedirectHandler))
self.handlers.append(("/retro/tree(.*)", RetroTreeHandler))
self.handlers.append(("/retro/notebooks(.*)", RetroNotebookHandler))
self.handlers.append(("/retro/edit(.*)", RetroFileHandler))
| Make it easier to find the Retro tree
Thank you very much for this repo!
Actually, I am looking into using Retro because I want to write automated Javascript tests for our QHub JupyterHub environment, and it is difficult to locate specific UI elements in a predictable manner in regular JupyterLab. Maybe Retro will make that easier...
Anyway, when I first installed it I struggled to actually understand how to access it from JupyterLab. I don't think there is documentation that says exactly what you need to do.
I'd seen your screenshots that were at the /retro/... path so I went to /retro and got 404... It was only by trying /retro/tree exactly that I realised things were working after all, I just needed to be specific on the URL.
So it would be great if at least /retro and /retro/ can redirect to /retro/tree, assuming URLs are the recommended route to access Retro. And either way, just to mention what to do in the docs would be really helpful.
Thanks again.
| Thanks @danlester.
For now the integration lab -> retro is this icon in the notebook toolbar:

Maybe there should be something similar to the "Launch Classic Notebook", but for Retro?

> So it would be great if at least /retro and /retro/ can redirect to /retro/tree
Agree it would be great to have :+1:
Thanks for your thoughts.
I just discovered the button in the notebook toolbar!
Yes, would be good if there was also a link to the retro tree somewhere before you've actually opened a notebook.
It would probably require having a similar plugin as the one in JupyterLab:
https://github.com/jupyterlab/jupyterlab/blob/b8725f0ed99b199c535caba6898f5771832e9da9/packages/help-extension/src/index.tsx#L166-L200
That would live in https://github.com/jtpio/retrolab/blob/main/packages/lab-extension/src/index.ts
In case you would like to give it a shot and open a PR :) | 2021-05-27T11:01:20 | 0.0 | [] | [] |
||
tsutaj/statements-manager | tsutaj__statements-manager-132 | 8c739aa783ef0f5411c04ae2d4dad4bf96f22b62 | diff --git a/statements_manager/main.py b/statements_manager/main.py
index 0692b6c..ef4646b 100644
--- a/statements_manager/main.py
+++ b/statements_manager/main.py
@@ -89,7 +89,7 @@ def get_parser() -> argparse.ArgumentParser:
"creds_path",
help="path to credentials file (json)\n"
"how to create credentials file: "
- "see https://github.com/tsutaj/statements-manager/blob/master/README.md#how-to-use",
+ "see https://statements-manager.readthedocs.io/ja/stable/register_credentials.html",
)
return parser
diff --git a/statements_manager/src/manager.py b/statements_manager/src/manager.py
index 73a4853..ea93c6b 100644
--- a/statements_manager/src/manager.py
+++ b/statements_manager/src/manager.py
@@ -115,7 +115,7 @@ def get_docs_contents(self, problem_id: str) -> Tuple[ContentsStatus, str]:
logger.warning(
"tips: try 'ss-manager reg-creds' before running on docs mode.\n"
"how to create credentials file: "
- "see https://github.com/tsutaj/statements-manager/blob/master/README.md#how-to-use"
+ "see https://statements-manager.readthedocs.io/ja/stable/register_credentials.html"
)
return (ContentsStatus.NG, "")
| Google Docs ã使ã£ãåé¡æå¦çæã®ã¨ã©ã¼ã®å°ç·ãå¤ã
Google Docs ã使ã£ãåé¡æãã¬ã³ããªã³ã°ãããã¨ããã¨ã¨ã©ã¼ãåºããã¨ããããããã®éã«åèã«ãªããªã³ã¯å
ãåºãã¦ããããããå¤ãã®ã§ç´ãããã
| 2023-06-17T21:49:48 | 0.0 | [] | [] |
|||
googleapis/python-db-dtypes-pandas | googleapis__python-db-dtypes-pandas-238 | 87484cd4ecdc3aa33d1786198ae76547a3f1fb9b | diff --git a/noxfile.py b/noxfile.py
index 36c6554..102670a 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -201,13 +201,16 @@ def prerelease(session, tests_path):
"--upgrade",
"pyarrow",
)
+ # Avoid pandas==2.2.0rc0 as this version causes PyArrow to fail. Once newer
+ # prerelease comes out, this constraint can be removed. See
+ # https://github.com/googleapis/python-db-dtypes-pandas/issues/234
session.install(
"--extra-index-url",
"https://pypi.anaconda.org/scipy-wheels-nightly/simple",
"--prefer-binary",
"--pre",
"--upgrade",
- "pandas",
+ "pandas!=2.2.0rc0",
)
session.install(
"mock",
diff --git a/owlbot.py b/owlbot.py
index 4b89096..d1b3c08 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -109,13 +109,16 @@ def prerelease(session, tests_path):
"--upgrade",
"pyarrow",
)
+ # Avoid pandas==2.2.0rc0 as this version causes PyArrow to fail. Once newer
+ # prerelease comes out, this constraint can be removed. See
+ # https://github.com/googleapis/python-db-dtypes-pandas/issues/234
session.install(
"--extra-index-url",
"https://pypi.anaconda.org/scipy-wheels-nightly/simple",
"--prefer-binary",
"--pre",
"--upgrade",
- "pandas",
+ "pandas!=2.2.0rc0",
)
session.install(
"mock",
| tests.unit.test_arrow: many tests failed
Many tests failed at the same time in this package.
* I will close this issue when there are no more failures in this package _and_
there is at least one pass.
* No new issues will be filed for this package until this issue is closed.
* If there are already issues for individual test cases, I will close them when
the corresponding test passes. You can close them earlier, if you prefer, and
I won't reopen them while this issue is still open.
Here are the tests that failed:
* test_series_from_arrow[expected0-pyarrow_array0]
* test_series_from_arrow[expected1-pyarrow_array1]
* test_series_from_arrow[expected2-pyarrow_array2]
* test_series_from_arrow[expected3-pyarrow_array3]
* test_series_from_arrow[expected4-pyarrow_array4]
* test_series_from_arrow[expected5-pyarrow_array5]
* test_series_from_arrow[expected6-pyarrow_array6]
* test_series_from_arrow[expected7-pyarrow_array7]
* test_series_from_arrow[expected8-pyarrow_array8]
* test_series_from_arrow[expected9-pyarrow_array9]
* test_series_from_arrow[expected10-pyarrow_array10]
* test_series_from_arrow[expected11-pyarrow_array11]
* test_series_from_arrow[expected12-pyarrow_array12]
* test_series_from_arrow[expected13-pyarrow_array13]
* test_series_from_arrow[expected14-pyarrow_array14]
* test_series_from_arrow[expected15-pyarrow_array15]
* test_series_from_arrow[expected16-pyarrow_array16]
* test_series_from_arrow[expected17-pyarrow_array17]
* test_series_from_arrow[time-nanoseconds-arrow-round-trip]
* test_series_from_arrow[time-nanoseconds-arrow-from-string] (#120)
* test_dataframe_from_arrow
-----
commit: 12156f4b2560aeae15e299307e871146c79efc38
buildURL: [Build Status](https://source.cloud.google.com/results/invocations/aa201988-b9df-4c1e-9744-d0a3a22fae1b), [Sponge](http://sponge2/aa201988-b9df-4c1e-9744-d0a3a22fae1b)
status: failed
| Looks like this issue is flaky. :worried:
I'm going to leave this open and stop commenting.
A human should fix and close this.
---
When run at the same commit (12156f4b2560aeae15e299307e871146c79efc38), this test passed in one build ([Build Status](https://source.cloud.google.com/results/invocations/aa201988-b9df-4c1e-9744-d0a3a22fae1b), [Sponge](http://sponge2/aa201988-b9df-4c1e-9744-d0a3a22fae1b)) and failed in another build ([Build Status](https://source.cloud.google.com/results/invocations/aa201988-b9df-4c1e-9744-d0a3a22fae1b), [Sponge](http://sponge2/aa201988-b9df-4c1e-9744-d0a3a22fae1b)).
This has something to do with a pandas release candidate `pandas==2.2.0rc0`. In this release candidate, `make_block()` is [deprecated](https://pandas.pydata.org/pandas-docs/version/2.2.0rc0/whatsnew/v2.2.0.html#other-deprecations). It got quickly [reverted](https://github.com/pandas-dev/pandas/pull/56481) for the same issue we are encountering here, as pyarrow is still using this method. We will keep this issue open, wait for the newer pre-release version of pandas to come out, and close this issue when the tests stop failing.
@Linchin Rather than sitting on this and waiting for pandas to change their release candidate (`rc`): I would suggest we update our install instructions to not use this particular `rc` during testing so that this issue can be closed.
* `pandas==2.2.0rc0`
I would be very specific in pinpointing this version versus some sort of greater than equality check so that we don't accidentally prevent it from trying new versions when `2.2.0rc1` OR higher rolls out.
That's a great idea! I'll do this | 2024-01-19T19:33:32 | 0.0 | [] | [] |
||
elimuinformatics/vcf2fhir | elimuinformatics__vcf2fhir-55 | 672337ea53e28b3b4f53a088263d59bfaab2db24 | diff --git a/vcf2fhir/common.py b/vcf2fhir/common.py
index 9df8239..0027b8f 100644
--- a/vcf2fhir/common.py
+++ b/vcf2fhir/common.py
@@ -4,6 +4,7 @@
import pytz
import logging
import re
+from collections import OrderedDict
general_logger = logging.getLogger("vcf2fhir.general")
@@ -139,3 +140,11 @@ def _error_log_allelicstate(record):
"Cannot Determine AllelicState for: %s , considered sample: %s",
record,
record.samples[0].data)
+
+
+def createOrderedDict(value_from, order):
+ value_to = OrderedDict()
+ for key in order:
+ if key in value_from.keys():
+ value_to[key] = value_from[key]
+ return value_to
diff --git a/vcf2fhir/fhir_helper.py b/vcf2fhir/fhir_helper.py
index 4089a26..87f9714 100644
--- a/vcf2fhir/fhir_helper.py
+++ b/vcf2fhir/fhir_helper.py
@@ -9,10 +9,19 @@
import fhirclient.models.fhirdate as date
import fhirclient.models.range as valRange
import fhirclient.models.medicationstatement as medication
-from collections import OrderedDict
+import numpy as np
from uuid import uuid4
from .common import *
+CG_ORDER = ["system", "code"]
+CODE_ORD = ["system", "code", "display"]
+RS_ORDER = ['resourceType', 'id', 'meta', 'status', 'category', 'code',
+ 'subject', 'component']
+DV_ORDER = ['resourceType', 'id', 'meta', 'status', 'category', 'code',
+ 'subject', 'valueCodeableConcept', 'component']
+SID_ORDER = ['resourceType', 'id', 'meta', 'status', 'category',
+ 'code', 'subject', 'valueCodeableConcept', 'derivedFrom']
+
class _Fhir_Helper:
def __init__(self, patientID):
@@ -459,11 +468,8 @@ def generate_final_json(self):
od["result"] = response['result']
else:
od["result"] = []
- od_code_coding = OrderedDict()
- od_code_coding["system"] = od["code"]["coding"][0]["system"]
- od_code_coding["code"] = od["code"]["coding"][0]["code"]
- od_code_coding["display"] = od["code"]["coding"][0]["display"]
- od["code"]["coding"][0] = od_code_coding
+ od['code']['coding'][0] =\
+ createOrderedDict(od['code']['coding'][0], CODE_ORD)
sidIndex = 0
for index, fhirReport in enumerate(od['contained']):
@@ -487,110 +493,40 @@ def generate_final_json(self):
fhirReport['derivedFrom'] = derivedFrom
for k, i in enumerate(od['contained']):
+ od_contained_k = od['contained'][k]
+ v_c_c = 'valueCodeableConcept'
+
if (i['category'][0]['coding'][0]):
- od_category_coding = OrderedDict()
- temp = i['category'][0]['coding'][0]["system"]
- od_category_coding["system"] = temp
- temp = i['category'][0]['coding'][0]["code"]
- od_category_coding["code"] = temp
- temp = od_category_coding
- od['contained'][k]['category'][0]['coding'][0] = temp
+ od_contained_k['category'][0]['coding'][0] =\
+ createOrderedDict(i['category'][0]['coding'][0], CG_ORDER)
if (i['code']['coding'][0]):
- od_code_coding = OrderedDict()
- od_code_coding["system"] = i['code']['coding'][0]["system"]
- od_code_coding["code"] = i['code']['coding'][0]["code"]
- od_code_coding["display"] = i['code']['coding'][0]["display"]
- od['contained'][k]['code']['coding'][0] = od_code_coding
+ od_contained_k['code']['coding'][0] =\
+ createOrderedDict(i['code']['coding'][0], CODE_ORD)
- if 'valueCodeableConcept' in i.keys():
- od_value_codeable_concept_coding = OrderedDict()
- temp = i['valueCodeableConcept']['coding'][0]["system"]
- od_value_codeable_concept_coding["system"] = temp
- temp = i['valueCodeableConcept']['coding'][0]["code"]
- od_value_codeable_concept_coding["code"] = temp
- temp = i['valueCodeableConcept']['coding'][0]["display"]
- od_value_codeable_concept_coding["display"] = temp
- temp = od_value_codeable_concept_coding
- od['contained'][k]['valueCodeableConcept']['coding'][0] = temp
+ if v_c_c in i.keys():
+ od_contained_k[v_c_c]['coding'][0] =\
+ createOrderedDict(i[v_c_c]['coding'][0], CODE_ORD)
if ((i['id'].startswith('dv-')) or (i['id'].startswith('rs-'))):
for q, j in enumerate(i['component']):
- od_component_code_coding = OrderedDict()
- if j['code']['coding'][0]["system"]:
- temp = j['code']['coding'][0]["system"]
- od_component_code_coding["system"] = temp
- if j['code']['coding'][0]["code"]:
- temp = j['code']['coding'][0]["code"]
- od_component_code_coding["code"] = temp
- if j['code']['coding'][0]["display"]:
- temp = j['code']['coding'][0]["display"]
- od_component_code_coding["display"] = temp
- if od['contained'][k]['component'][q]['code']['coding'][0]:
- temp = od_component_code_coding
- s1 = 'contained'
- s2 = 'component'
- od[s1][k][s2][q]['code']['coding'][0] = temp
+ od_contained_k_component_q = od_contained_k['component'][q]
+ if od_contained_k_component_q['code']['coding'][0]:
+ od_contained_k_component_q['code']['coding'][0] =\
+ createOrderedDict(j['code']['coding'][0], CODE_ORD)
- od_componentvalue_codeable_concept = OrderedDict()
- if 'valueCodeableConcept' in j.keys():
- temp = j['valueCodeableConcept']['coding'][0]["system"]
- od_componentvalue_codeable_concept["system"] = temp
- if 'code' in j['valueCodeableConcept']['coding'][0]\
- .keys(
- ):
- t = j['valueCodeableConcept']['coding'][0]["code"]
- od_componentvalue_codeable_concept["code"] = t
- if 'display' in j['valueCodeableConcept']['coding'][0]\
- .keys(
- ):
- s1 = 'valueCodeableConcept'
- s2 = 'display'
- temp = j[s1]['coding'][0]["display"]
- od_componentvalue_codeable_concept[s2] = temp
- s1 = 'contained'
- s2 = 'component'
- s3 = 'valueCodeableConcept'
- temp = od_componentvalue_codeable_concept
- od[s1][k][s2][q][s3]['coding'][0] = temp
+ if v_c_c in j.keys():
+ od_contained_k_component_q[v_c_c]['coding'][0] =\
+ createOrderedDict(j[v_c_c]['coding'][0], CODE_ORD)
if (i['id'].startswith('rs-')):
- od_RS = OrderedDict()
- od_RS["resourceType"] = i['resourceType']
- od_RS["id"] = i['id']
- od_RS["meta"] = i['meta']
- od_RS["status"] = i['status']
- od_RS["category"] = i['category']
- od_RS["code"] = i['code']
- od_RS["subject"] = i['subject']
- od_RS["component"] = i['component']
- od['contained'][k] = od_RS
+ od['contained'][k] = createOrderedDict(i, RS_ORDER)
if (i['id'].startswith('dv-')):
- od_DV = OrderedDict()
- od_DV["resourceType"] = i['resourceType']
- od_DV["id"] = i['id']
- od_DV["meta"] = i['meta']
- od_DV["status"] = i['status']
- od_DV["category"] = i['category']
- od_DV["code"] = i['code']
- od_DV["subject"] = i['subject']
- od_DV["valueCodeableConcept"] = i['valueCodeableConcept']
- od_DV["component"] = i['component']
- od['contained'][k] = od_DV
+ od['contained'][k] = createOrderedDict(i, DV_ORDER)
if (i['id'].startswith('sid-')):
- od_SID = OrderedDict()
- od_SID["resourceType"] = i['resourceType']
- od_SID["id"] = i['id']
- od_SID["meta"] = i['meta']
- od_SID["status"] = i['status']
- od_SID["category"] = i['category']
- od_SID["code"] = i['code']
- od_SID["subject"] = i['subject']
- od_SID["valueCodeableConcept"] = i['valueCodeableConcept']
- od_SID["derivedFrom"] = i['derivedFrom']
- od['contained'][k] = od_SID
+ od['contained'][k] = createOrderedDict(i, SID_ORDER)
self.fhir_json = od
def export_fhir_json(self, output_filename):
| Simplifying the code
## Prerequisites
- [x] I am running the latest version
- [X] I checked the documentation and found no answer
- [X] I checked to make sure that this issue has not already been filed
## Context
* Package Version: Latest merge
* Operating System: Window 10
## Current Behavior & Expected Behavior
Example : In fhir_helper.py (line 564)
```md
temp = j['valueCodeableConcept']['coding'][0]["system"]
od_componentvalue_codeable_concept["system"] = temp
```
it can be converted into this
```md
od_componentvalue_codeable_concept["system"] = j['valueCodeableConcept']['coding'][0]["system"]
```
and like these code of lines in the project
| @rhdolin @srgothi92 please tell me if this cannot be considered as an issue or if there something relate to this code that I am not aware of
@abhishek-jain-1999, the code was `od_componentvalue_codeable_concept["system"] = j['valueCodeableConcept']['coding'][0]["system"]` before and I changed it to
`temp = j['valueCodeableConcept']['coding'][0]["system"]`
`od_componentvalue_codeable_concept["system"] = temp`
because according to **PEP 8** we can only have 79 characters in a line.
@Rohan-cod okay now I understand why there were such changes but don't you think it is useless to have a temp variable made in memory not only just to give it away it next line but also to keep in memory until it is dumped
in short double memory usage will be there and unnecessary time will be used as it is been done at multiple places
and also as code reader it is confusing to see a variable been made and not been use later
like for example
```md
if 'code' in j['valueCodeableConcept']['coding'][0]\
.keys(
):
t = j['valueCodeableConcept']['coding'][0]["code"]
od_componentvalue_codeable_concept["code"] = t
```
this looks much simpler to understand
```md
if 'code' in j['valueCodeableConcept']['coding'][0].keys():
od_componentvalue_codeable_concept["code"] = j['valueCodeableConcept']['coding'][0]["code"]
```
You are absolutely correct @abhishek-jain-1999. If you have any other way to conform to the rule of **having no more than 79 characters in a single line** you can do that, but using `od_componentvalue_codeable_concept["code"] = j['valueCodeableConcept']['coding'][0]["code"]` will cause the checks to fail.
@Rohan-cod Thanks for your response . Let me see if I am able to find another way to handle this issue
No Problem @abhishek-jain-1999 ð. All the best ð
@abhishek-jain-1999, I figured out a way to do this. I missed it while I was changing the code ð
.
Changing the code from ð
```lisp
if 'code' in j['valueCodeableConcept']['coding'][0]\
.keys(
):
t = j['valueCodeableConcept']['coding'][0]["code"]
od_componentvalue_codeable_concept["code"] = t
```
to ð
```lisp
if 'code' in j['valueCodeableConcept']['coding'][0]\
.keys(
):
od_componentvalue_codeable_concept["code"] =\
j['valueCodeableConcept']['coding'][0]["code"]
```
and doing the same at other locations will help you remove most of the temporary variables.
Just an option. Completely up to you, if you want you can use my approach.
Nice work everyone, I really like the conversation that is happening in each issues and PR.
Contributors Cheers ð·
> Nice work everyone, I really like the conversation that is happening in each issues and PR.
>
> Contributors Cheers ð·
Cheers @srgothi92 ð | 2021-04-09T19:59:39 | 0.0 | [] | [] |
||
EnableSecurity/wafw00f | EnableSecurity__wafw00f-125 | 093195c6f19c2ac656f53142a0a966677efa8826 | diff --git a/wafw00f/main.py b/wafw00f/main.py
old mode 100755
new mode 100644
index d36da6a5..37e14463
--- a/wafw00f/main.py
+++ b/wafw00f/main.py
@@ -137,20 +137,22 @@ def genericdetect(self):
return True
# Checking for the Server header after sending malicious requests
+ normalserver, attackresponse_server = '', ''
response = self.attackres
- normalserver = resp1.headers.get('Server')
- attackresponse_server = response.headers.get('Server')
- if attackresponse_server:
- if attackresponse_server != normalserver:
- self.log.info('Server header changed, WAF possibly detected')
- self.log.debug('Attack response: %s' % attackresponse_server)
- self.log.debug('Normal response: %s' % normalserver)
- reason = reasons[1]
- reason += '\r\nThe server header for a normal response is "%s",' % normalserver
- reason += ' while the server header a response to an attack is "%s",' % attackresponse_server
- self.knowledge['generic']['reason'] = reason
- self.knowledge['generic']['found'] = True
- return True
+ if 'server' in resp1.headers:
+ normalserver = resp1.headers.get('Server')
+ if 'server' in response.headers:
+ attackresponse_server = response.headers.get('Server')
+ if attackresponse_server != normalserver:
+ self.log.info('Server header changed, WAF possibly detected')
+ self.log.debug('Attack response: %s' % attackresponse_server)
+ self.log.debug('Normal response: %s' % normalserver)
+ reason = reasons[1]
+ reason += '\r\nThe server header for a normal response is "%s",' % normalserver
+ reason += ' while the server header a response to an attack is "%s",' % attackresponse_server
+ self.knowledge['generic']['reason'] = reason
+ self.knowledge['generic']['found'] = True
+ return True
# If at all request doesn't go, press F
except RequestBlocked:
@@ -340,7 +342,7 @@ def main():
try:
m = [i.replace(')', '').split(' (') for i in wafdetectionsprio]
print(R+' WAF Name'+' '*24+'Manufacturer\n '+'-'*8+' '*24+'-'*12+'\n')
- max_len = max(len(str(x)) for k in m for x in k)
+ max_len = max(len(str(x)) for k in m for x in k)
for inner in m:
first = True
for elem in inner:
@@ -382,7 +384,7 @@ def main():
elif options.input.endswith('.csv'):
columns = defaultdict(list)
with open(options.input) as f:
- reader = csv.DictReader(f)
+ reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns[k].append(v)
@@ -462,7 +464,7 @@ def main():
elif options.output.endswith('.csv'):
log.debug("Exporting data in csv format to file: %s" % (options.output))
with open(options.output, 'w') as outfile:
- csvwriter = csv.writer(outfile, delimiter=',', quotechar='"',
+ csvwriter = csv.writer(outfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
count = 0
for result in results:
| AttributeError: 'NoneType' object has no attribute 'headers'
`$ wafw00f http://balancepayout.paypal.com `
```
Traceback (most recent call last):
File "/usr/local/bin/wafw00f", line 4, in <module>
__import__('pkg_resources').run_script('wafw00f==2.1.0', 'wafw00f')
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 658, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 1438, in run_script
exec(code, namespace, namespace)
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/EGG-INFO/scripts/wafw00f", line 8, in <module>
main.main()
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 442, in main
if attacker.genericdetect():
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 142, in genericdetect
attackresponse_server = response.headers.get('Server')
AttributeError: 'NoneType' object has no attribute 'headers'
```
AttributeError: 'NoneType' object has no attribute 'headers'
`$ wafw00f http://balancepayout.paypal.com `
```
Traceback (most recent call last):
File "/usr/local/bin/wafw00f", line 4, in <module>
__import__('pkg_resources').run_script('wafw00f==2.1.0', 'wafw00f')
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 658, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 1438, in run_script
exec(code, namespace, namespace)
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/EGG-INFO/scripts/wafw00f", line 8, in <module>
main.main()
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 442, in main
if attacker.genericdetect():
File "/usr/local/lib/python3.6/dist-packages/wafw00f-2.1.0-py3.6.egg/wafw00f/main.py", line 142, in genericdetect
attackresponse_server = response.headers.get('Server')
AttributeError: 'NoneType' object has no attribute 'headers'
```
| Verified that is a bug. Will shortly push a fix to this.
Verified that is a bug. Will shortly push a fix to this. | 2021-01-22T16:04:12 | 0.0 | [] | [] |
||
augerai/a2ml | augerai__a2ml-515 | 2392fb65db622fd6c204f132ee5a63b41aae1a0e | diff --git a/a2ml/api/a2ml.py b/a2ml/api/a2ml.py
index b9fbf172..c92955a7 100644
--- a/a2ml/api/a2ml.py
+++ b/a2ml/api/a2ml.py
@@ -304,7 +304,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
- Iris-setosa
* - Iris-virginica
- Iris-virginica
- * It may also contain train features to retrain while Review
+ * It may also contain train features to retrain while Review(if target missed) and for distribution chart
This method support only one provider
diff --git a/a2ml/api/a2ml_model.py b/a2ml/api/a2ml_model.py
index a79abb71..ed58746d 100644
--- a/a2ml/api/a2ml_model.py
+++ b/a2ml/api/a2ml_model.py
@@ -149,7 +149,7 @@ def actuals(self, model_id, filename=None, data=None, columns=None, actuals_at=N
- Iris-setosa
* - Iris-virginica
- Iris-virginica
- * It may also contain train features to retrain while Review
+ * It may also contain train features to retrain while Review(if target missed) and for distribution chart
This method support only one provider
diff --git a/setup.py b/setup.py
index 22feb59b..bdedc7de 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ def run(self):
'smart_open==1.9.0', # version for azure
'jsonpickle',
'websockets',
- 'liac-arff',
+ 'liac-arff==2.4.0',
'xlrd==1.2.0'
]
| WIP: Move api to auger.ai repo
Moving all aunderlying auger api code to auger.ai repo
| 2021-01-26T16:23:07 | 0.0 | [] | [] |
|||
googleapis/python-logging | googleapis__python-logging-848 | 1216cf61b161ed10281842242b711a7b95fea675 | diff --git a/README.rst b/README.rst
index 2618dc37a..84dd1e77f 100644
--- a/README.rst
+++ b/README.rst
@@ -61,8 +61,8 @@ Python >= 3.7
Unsupported Python Versions
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Python == 2.7. The last version of the library compatible with Python 2.7 is `google-cloud-logging==1.15.1`.
-Python == 3.6. The last version of the library compatible with Python 3.6 is `google-cloud-logging==3.1.2`.
+| Python == 2.7. The last version of the library compatible with Python 2.7 is ``google-cloud-logging==1.15.1``.
+| Python == 3.6. The last version of the library compatible with Python 3.6 is ``google-cloud-logging==3.1.2``.
Mac/Linux
diff --git a/docs/std-lib-integration.rst b/docs/std-lib-integration.rst
index a485fce6d..be43231fd 100644
--- a/docs/std-lib-integration.rst
+++ b/docs/std-lib-integration.rst
@@ -44,6 +44,16 @@ There are two supported handler classes to choose from:
to standard out, to be read and parsed by a GCP logging agent
- This is the default handler on Kubernetes Engine, Cloud Functions and Cloud Run
+Handler classes can also be specified via `dictConfig <https://docs.python.org/3/library/logging.config.html#logging-config-dictschema>`_:
+
+.. literalinclude:: ../samples/snippets/usage_guide.py
+ :start-after: [START logging_dict_config]
+ :end-before: [END logging_dict_config]
+ :dedent: 4
+
+Note that since :class:`~google.cloud.logging_v2.handlers.handlers.CloudLoggingHandler` requires an already initialized :class:`~google.cloud.logging_v2.client.Client`,
+you must initialize a client and include it in the dictConfig entry for a `CloudLoggingHandler`.
+
Standard Library
---------------------------
@@ -101,8 +111,7 @@ The following fields are currently supported:
- :ref:`json_fields<JSON>`
.. note::
- Fields marked with "*" require a supported Python web framework. The Google Cloud Logging
- library currently supports `flask <https://flask.palletsprojects.com/>`_ and `django <https://www.djangoproject.com/>`_
+ Fields marked with "*" require a :doc:`supported Python web framework </web-framework-integration>`.
Manual Metadata Using the `extra` Argument
--------------------------------------------
diff --git a/docs/usage.rst b/docs/usage.rst
index 929ee9cef..7541f355b 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -4,6 +4,7 @@ Usage Guide
:maxdepth: 2
std-lib-integration
+ web-framework-integration
direct-lib-usage
grpc-vs-http
diff --git a/docs/web-framework-integration.rst b/docs/web-framework-integration.rst
new file mode 100644
index 000000000..d91d714b3
--- /dev/null
+++ b/docs/web-framework-integration.rst
@@ -0,0 +1,32 @@
+Integration with Python Web Frameworks
+======================================
+
+The Google Cloud Logging library can integrate with Python web frameworks
+`flask <https://flask.palletsprojects.com/>`_ and `django <https://www.djangoproject.com/>`_ to
+automatically populate `LogEntry fields <https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry>`_
+`trace`, `span_id`, `trace_sampled`, and `http_request`.
+
+Django
+------
+
+Django integration has been tested to work with each of the Django/Python versions listed `here <https://docs.djangoproject.com/en/stable/faq/install/#what-python-version-can-i-use-with-django>`_.
+To enable Django integration, add `google.cloud.logging_v2.handlers.middleware.RequestMiddleware` to the list of `MIDDLEWARE`
+in your `settings <https://docs.djangoproject.com/en/stable/topics/settings/>`_ file. Also be sure to :doc:`set up logging </std-lib-integration>` in your settings file.
+
+Flask
+-----
+
+Flask integration has been tested to work with the following versions of Flask:
+
+=============== ==============
+Python version Flask versions
+=============== ==============
+3.7 >=1.0.0
+3.8 >=1.0.0
+3.9 >=1.0.0
+3.10 >=1.0.3
+3.11 >=1.0.3
+3.12 >=1.0.3
+=============== ==============
+
+Be sure to :doc:`set up logging </std-lib-integration>` before declaring the Flask app.
diff --git a/google/cloud/logging_v2/handlers/_helpers.py b/google/cloud/logging_v2/handlers/_helpers.py
index 43678ed0d..f0c301ceb 100644
--- a/google/cloud/logging_v2/handlers/_helpers.py
+++ b/google/cloud/logging_v2/handlers/_helpers.py
@@ -66,7 +66,7 @@ def get_request_data_from_flask():
Returns:
Tuple[Optional[dict], Optional[str], Optional[str], bool]:
Data related to the current http request, trace_id, span_id and trace_sampled
- for the request. All fields will be None if a django request isn't found.
+ for the request. All fields will be None if a Flask request isn't found.
"""
if flask is None or not flask.request:
return None, None, None, False
diff --git a/samples/snippets/usage_guide.py b/samples/snippets/usage_guide.py
index 5c9e86990..f4292a9de 100644
--- a/samples/snippets/usage_guide.py
+++ b/samples/snippets/usage_guide.py
@@ -484,6 +484,37 @@ def setup_logging(client):
# [END setup_logging_excludes]
+@snippet
+def logging_dict_config(client):
+ import logging.config
+
+ # [START logging_dict_config]
+ import google.cloud.logging
+
+ client = google.cloud.logging.Client()
+
+ LOGGING = {
+ "version": 1,
+ "handlers": {
+ "cloud_logging": {
+ "class": "google.cloud.logging.handlers.CloudLoggingHandler",
+ "client": client,
+ },
+ "structured_log": {
+ "class": "google.cloud.logging.handlers.StructuredLogHandler"
+ },
+ },
+ "root": {"handlers": ["console"], "level": "WARNING"},
+ "loggers": {
+ "my_logger": {"handlers": ["cloud_logging"], "level": "INFO"},
+ "my_other_logger": {"handlers": ["structured_log"], "level": "INFO"},
+ },
+ }
+ # [END logging_dict_config]
+
+ logging.config.dictConfig(LOGGING)
+
+
def _line_no(func):
return func.__code__.co_firstlineno
| document django middleware
The django middleware isn't documented anywhere as far as I can see.
It needs to be documented here: https://cloud.google.com/logging/docs/setup/python#write_logs_with_the_standard_python_logging_handler
There are lots of people reporting the logs don't have an http info and this could certainly be a root cause.
Perhaps it was documented at one point? See this issue here:
https://github.com/googleapis/python-logging/issues/677
Or maybe we're all just looking through the source.
Thanks.
| @maclek I am currently working on documenting Django integrations. Is there anything else that you have found that needs to be done in other to get them to work, other than adding the middleware to `settings.py`? | 2024-02-01T20:05:09 | 0.0 | [] | [] |
||
bo4e/BO4E-python | bo4e__BO4E-python-502 | b5597da4f01fa4549dba6cdcdbe7bc54d39dcf72 | diff --git a/src/bo4e/bo/messlokation.py b/src/bo4e/bo/messlokation.py
index bcbbdb12e..4bee7da4e 100644
--- a/src/bo4e/bo/messlokation.py
+++ b/src/bo4e/bo/messlokation.py
@@ -3,7 +3,7 @@
and corresponding marshmallow schema for de-/serialization
"""
import re
-from typing import Annotated, List, Optional
+from typing import Annotated, Any, List, Optional
from iso3166 import countries
from pydantic import Field, field_validator, model_validator
@@ -118,10 +118,14 @@ def _validate_messlokations_id_country_code(cls, messlokations_id: str) -> str:
"Checks that if an address is given, that there is only one valid address given"
# pylint: disable=no-self-argument
- @model_validator(mode="after") # type:ignore[arg-type]
+ @model_validator(mode="before")
@classmethod
- def validate_grundzustaendiger_x_codenr(cls, model: "Messlokation") -> "Messlokation":
+ def validate_grundzustaendiger_x_codenr(cls, data: Any) -> dict[str, Any]:
"""Checks that if a codenr is given, that there is only one valid codenr given."""
- if model.grundzustaendiger_msb_codenr is not None and model.grundzustaendiger_msbim_codenr is not None:
+ assert isinstance(data, dict), "data is not a dict"
+ if (
+ data.get("grundzustaendiger_msb_codenr", None) is not None
+ and data.get("grundzustaendiger_msbim_codenr", None) is not None
+ ):
raise ValueError("More than one codenr is given.")
- return model
+ return data
diff --git a/src/bo4e/validators.py b/src/bo4e/validators.py
index eec866803..14fe3e9b2 100644
--- a/src/bo4e/validators.py
+++ b/src/bo4e/validators.py
@@ -45,15 +45,15 @@ def combinations_of_fields(
def supplied(value: Any) -> bool:
return value is not None and (not isinstance(value, str) or value != "")
- def validator(self: ModelT) -> ModelT:
- bools = tuple(int(supplied(getattr(self, field))) for field in fields)
+ def validator(cls: type[ModelT], data: dict[str, Any]) -> dict[str, Any]:
+ bools = tuple(int(supplied(data.get(field, None))) for field in fields)
if bools in valid_combinations:
- return self
+ return data
if custom_error_message:
raise ValueError(custom_error_message)
- raise ValueError(f"Invalid combination of fields {fields} for {self!r}: {bools}")
+ raise ValueError(f"Invalid combination of fields {fields} for {cls!r}: {bools}")
- return model_validator(mode="after")(validator)
+ return model_validator(mode="before")(validator)
# pylint:disable=unused-argument
| Change `model_validator(mode="after")` to `model_validator(mode="before")`
This is useful for projects using this library especially for Unittests if you are using `model_construct` a lot. This is actually due to a bug (or unintendend side effect), see here https://github.com/pydantic/pydantic/issues/6978.
For instance, this helps to resolve issues in PR https://github.com/Hochfrequenz/powercloud2lynqtech/pull/1816.
Otherwise, you would have to use `model_construct` on every nested structure.
| 2023-08-07T21:39:21 | 0.0 | [] | [] |
|||
learnables/learn2learn | learnables__learn2learn-403 | 39db32f25b91778beceee19624f8f98709deb78e | diff --git a/CHANGELOG.md b/CHANGELOG.md
index e4dee544..4d21350d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* Example for `detach_module`. ([Nimish Sanghi](https://github.com/nsanghi))
* Loading duplicate FGVC Aircraft images.
* Move vision datasets to Zenodo. (mini-ImageNet, tiered-ImageNet, FC100, CIFAR-FS)
+* mini-ImageNet targets are now ints (not np.float64).
## v0.1.7
diff --git a/learn2learn/vision/datasets/cifarfs.py b/learn2learn/vision/datasets/cifarfs.py
index 3eea95a7..177ce5f2 100644
--- a/learn2learn/vision/datasets/cifarfs.py
+++ b/learn2learn/vision/datasets/cifarfs.py
@@ -95,7 +95,7 @@ def _download(self):
with zipfile.ZipFile(zip_file, 'r') as zfile:
zfile.extractall(self.raw_path)
os.remove(zip_file)
- except:
+ except Exception:
download_file_from_google_drive('1pTsCCMDj45kzFYgrnO67BWVbKs48Q3NI',
zip_file)
with zipfile.ZipFile(zip_file, 'r') as zfile:
diff --git a/learn2learn/vision/datasets/fc100.py b/learn2learn/vision/datasets/fc100.py
index 325ba3f0..abc7b543 100644
--- a/learn2learn/vision/datasets/fc100.py
+++ b/learn2learn/vision/datasets/fc100.py
@@ -93,7 +93,7 @@ def download(self):
archive_file = zipfile.ZipFile(archive_path)
archive_file.extractall(self.root)
os.remove(archive_path)
- except:
+ except Exception:
try: # Download from Google Drive first
download_file_from_google_drive(FC100.GOOGLE_DRIVE_FILE_ID,
archive_path)
diff --git a/learn2learn/vision/datasets/mini_imagenet.py b/learn2learn/vision/datasets/mini_imagenet.py
index 8234e551..f039e2d9 100644
--- a/learn2learn/vision/datasets/mini_imagenet.py
+++ b/learn2learn/vision/datasets/mini_imagenet.py
@@ -109,7 +109,7 @@ def __init__(
download_file(dropbox_file_link, pickle_file)
with open(pickle_file, 'rb') as f:
self.data = pickle.load(f)
- except:
+ except Exception:
try:
if not self._check_exists() and download:
print('Downloading mini-ImageNet --', mode)
@@ -136,7 +136,7 @@ def __getitem__(self, idx):
data = self.x[idx]
if self.transform:
data = self.transform(data)
- return data, self.y[idx]
+ return data, int(self.y[idx])
def __len__(self):
return len(self.x)
diff --git a/learn2learn/vision/datasets/tiered_imagenet.py b/learn2learn/vision/datasets/tiered_imagenet.py
index 6a7ee691..c7a09419 100644
--- a/learn2learn/vision/datasets/tiered_imagenet.py
+++ b/learn2learn/vision/datasets/tiered_imagenet.py
@@ -105,7 +105,7 @@ def download(self, file_id, destination):
source=file_url,
destination=file_dest,
)
- except:
+ except Exception:
archive_path = os.path.join(destination, 'tiered_imagenet.tar')
download_file_from_google_drive(file_id, archive_path)
archive_file = tarfile.open(archive_path)
| Return pytorch tensor for mini-imagenet labels?
https://github.com/learnables/learn2learn/blob/06893e847693a0227d5f35a6e065e6161bb08201/learn2learn/vision/datasets/mini_imagenet.py#L111
Currently, when loading mini-imagenet the inputs are returned as pytorch tensors while the labels as numpy arrays. Since the user will likely use both in a training loop, does it make sense to cast the labels for long pytorch tensors?
| Thanks for spotting this and the docstring (#258) issue in mini-ImageNet @pietrolesci. Yes, both should be tensors.
In fact, let me piggy-back and mention one more issue: in the benchmarks, mini-ImageNet and tiered-ImageNet return samples with different input ranges, with `data_augmentation=None`. (Tiered is 0-1 but mini is 0-255.) Ideally, all vision benchmarks would have the same ranges and types by default, probably 0-255 uint8 for images.
Hi @seba-1511, thanks for picking this up and for your answer :) | 2023-05-29T10:25:46 | 0.0 | [] | [] |
||
nlpsuge/xsession-manager | nlpsuge__xsession-manager-33 | e6bf5a9fa279c9f4ef99c6ac672ea96d4175519a | diff --git a/xsession_manager/arguments_handler.py b/xsession_manager/arguments_handler.py
index b023809..95fc754 100755
--- a/xsession_manager/arguments_handler.py
+++ b/xsession_manager/arguments_handler.py
@@ -170,7 +170,7 @@ def handle_arguments(self):
try:
file_path = Path(constants.Locations.BASE_LOCATION_OF_SESSIONS, file)
with open(file_path, 'r') as f:
- num = num + 1
+ num += 1
namespace_objs: XSessionConfig = json.load(f, object_hook=lambda d: Namespace(**d))
print(str(num) +'. ' + namespace_objs.session_name,
namespace_objs.session_create_time,
@@ -201,7 +201,7 @@ def handle_arguments(self):
# Print data according to declared order
ordered_variables = vars(XSessionConfigObject)['__annotations__']
for x_session_config_object in x_session_config_objects:
- count = count + 1
+ count += 1
print('%d.' % count)
# Get fields in declared order
diff --git a/xsession_manager/xsession_manager.py b/xsession_manager/xsession_manager.py
index ed8777e..f4ead2f 100755
--- a/xsession_manager/xsession_manager.py
+++ b/xsession_manager/xsession_manager.py
@@ -233,7 +233,7 @@ def _move_windows_while_restore(self, session_name, x_session_config_objects_cop
while Gtk.events_pending():
Gtk.main_iteration()
- retry_count_down = retry_count_down - 1
+ retry_count_down -= 1
self._suppress_log_if_already_in_workspace = True
self.move_window(session_name)
@@ -281,7 +281,7 @@ def _restore_sessions(self,
running_restores.append(index)
is_running = True
with self.instance_lock:
- self.restore_app_countdown = self.restore_app_countdown - 1
+ self.restore_app_countdown -= 1
break
if is_running:
continue
| Convert four assignment statements to the usage of augmented operators
:eyes: Some source code analysis tools can help to find opportunities for improving software components.
:thought_balloon: I propose to [increase the usage of augmented assignment statements](https://docs.python.org/3/reference/simple_stmts.html#augmented-assignment-statements "Augmented assignment statements") accordingly.
```diff
diff --git a/xsession_manager/arguments_handler.py b/xsession_manager/arguments_handler.py
index b023809..95fc754 100755
--- a/xsession_manager/arguments_handler.py
+++ b/xsession_manager/arguments_handler.py
@@ -170,7 +170,7 @@ class ArgumentsHandler():
try:
file_path = Path(constants.Locations.BASE_LOCATION_OF_SESSIONS, file)
with open(file_path, 'r') as f:
- num = num + 1
+ num += 1
namespace_objs: XSessionConfig = json.load(f, object_hook=lambda d: Namespace(**d))
print(str(num) +'. ' + namespace_objs.session_name,
namespace_objs.session_create_time,
@@ -201,7 +201,7 @@ class ArgumentsHandler():
# Print data according to declared order
ordered_variables = vars(XSessionConfigObject)['__annotations__']
for x_session_config_object in x_session_config_objects:
- count = count + 1
+ count += 1
print('%d.' % count)
# Get fields in declared order
diff --git a/xsession_manager/xsession_manager.py b/xsession_manager/xsession_manager.py
index ed8777e..f4ead2f 100755
--- a/xsession_manager/xsession_manager.py
+++ b/xsession_manager/xsession_manager.py
@@ -233,7 +233,7 @@ class XSessionManager:
while Gtk.events_pending():
Gtk.main_iteration()
- retry_count_down = retry_count_down - 1
+ retry_count_down -= 1
self._suppress_log_if_already_in_workspace = True
self.move_window(session_name)
@@ -281,7 +281,7 @@ class XSessionManager:
running_restores.append(index)
is_running = True
with self.instance_lock:
- self.restore_app_countdown = self.restore_app_countdown - 1
+ self.restore_app_countdown -= 1
break
if is_running:
continue
```
Convert four assignment statements to the usage of augmented operators
:eyes: Some source code analysis tools can help to find opportunities for improving software components.
:thought_balloon: I propose to [increase the usage of augmented assignment statements](https://docs.python.org/3/reference/simple_stmts.html#augmented-assignment-statements "Augmented assignment statements") accordingly.
```diff
diff --git a/xsession_manager/arguments_handler.py b/xsession_manager/arguments_handler.py
index b023809..95fc754 100755
--- a/xsession_manager/arguments_handler.py
+++ b/xsession_manager/arguments_handler.py
@@ -170,7 +170,7 @@ class ArgumentsHandler():
try:
file_path = Path(constants.Locations.BASE_LOCATION_OF_SESSIONS, file)
with open(file_path, 'r') as f:
- num = num + 1
+ num += 1
namespace_objs: XSessionConfig = json.load(f, object_hook=lambda d: Namespace(**d))
print(str(num) +'. ' + namespace_objs.session_name,
namespace_objs.session_create_time,
@@ -201,7 +201,7 @@ class ArgumentsHandler():
# Print data according to declared order
ordered_variables = vars(XSessionConfigObject)['__annotations__']
for x_session_config_object in x_session_config_objects:
- count = count + 1
+ count += 1
print('%d.' % count)
# Get fields in declared order
diff --git a/xsession_manager/xsession_manager.py b/xsession_manager/xsession_manager.py
index ed8777e..f4ead2f 100755
--- a/xsession_manager/xsession_manager.py
+++ b/xsession_manager/xsession_manager.py
@@ -233,7 +233,7 @@ class XSessionManager:
while Gtk.events_pending():
Gtk.main_iteration()
- retry_count_down = retry_count_down - 1
+ retry_count_down -= 1
self._suppress_log_if_already_in_workspace = True
self.move_window(session_name)
@@ -281,7 +281,7 @@ class XSessionManager:
running_restores.append(index)
is_running = True
with self.instance_lock:
- self.restore_app_countdown = self.restore_app_countdown - 1
+ self.restore_app_countdown -= 1
break
if is_running:
continue
```
| Hi,
Sorry for late reply.
Thanks for the suggestion.
Would you please submit a PR?
:thought_balloon: Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
> ð Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
@elfring
I think it is OK to merge your PR to the main branch directly. I'll test it before merging anyway.
Please correct me if I understand you wrongly.
Does this feedback mean that you would like to integrate the change suggestion directly (without an extra pull/merge request from me)?
@elfring No, I'd like you commit a PR. :)
I'll merge your code later :)
Hi,
Sorry for late reply.
Thanks for the suggestion.
Would you please submit a PR?
:thought_balloon: Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
> ð Can the chances grow to integrate the shown small source code adjustments also directly (with support from other contributors eventually)?
@elfring
I think it is OK to merge your PR to the main branch directly. I'll test it before merging anyway.
Please correct me if I understand you wrongly.
Does this feedback mean that you would like to integrate the change suggestion directly (without an extra pull/merge request from me)?
@elfring No, I'd like you commit a PR. :)
I'll merge your code later :) | 2022-08-07T13:15:01 | 0.0 | [] | [] |
||
adafruit/Adafruit_CircuitPython_SCD4X | adafruit__Adafruit_CircuitPython_SCD4X-12 | ea17ea7cddccf7452ce98f6628147bf06c4cf351 | diff --git a/adafruit_scd4x.py b/adafruit_scd4x.py
index 3f5bc32..22ed6b8 100644
--- a/adafruit_scd4x.py
+++ b/adafruit_scd4x.py
@@ -208,7 +208,7 @@ def data_ready(self):
"""Check the sensor to see if new data is available"""
self._send_command(_SCD4X_DATAREADY, cmd_delay=0.001)
self._read_reply(self._buffer, 3)
- return not ((self._buffer[0] & 0x03 == 0) and (self._buffer[1] == 0))
+ return not ((self._buffer[0] & 0x07 == 0) and (self._buffer[1] == 0))
@property
def serial_number(self):
| get_data_ready_status should watch 11 bits (not 10)
Section 3.8.2 of the SCD4x datasheet says to check the least significant 11 bits of the returned status. But the code on line 211 in "data_ready" only checks 8+2 = 10 bits. I've never ever seen that unchecked bit get set when data was not ready, but just in case it ever does -- we should AND the MSB with 7 (3 bits + 8) instead of 3 (2 bits + 8) to follow the datasheet.
| That's a good catch. Thank you for the really direct references, it was really easy to look up and see. I tried going through the Sensirion provided c code to see what they use, but c is not my specialty. I've got a sensor so I should be able to give it a shot regardless
! can y'all submit a PR?
Yup! I should have one in tonight | 2022-01-21T06:03:02 | 0.0 | [] | [] |
||
ContextLab/davos | ContextLab__davos-95 | 4a3be140f5b77e8e01cdc964bf07972a2c9afbd5 | diff --git a/davos/core/project.py b/davos/core/project.py
index a4b39207..f86cc65a 100644
--- a/davos/core/project.py
+++ b/davos/core/project.py
@@ -49,6 +49,7 @@
import os
import shutil
import sys
+import warnings
from os.path import expandvars
from pathlib import Path
from urllib.request import urlopen
@@ -202,7 +203,7 @@ def _refresh_installed_pkgs(self):
cmd = (
f'{config.pip_executable} list '
'--disable-pip-version-check '
- f'--path {self.site_packages_dir} '
+ f'--path "{self.site_packages_dir}" '
f'--format json'
)
pip_list_stdout = run_shell_command(cmd, live_stdout=False)
@@ -678,6 +679,15 @@ def get_notebook_path():
notebook_relpath = unquote(session['notebook']['path'])
return f'{nbserver_root_dir}/{notebook_relpath}'
+ # VS Code doesn't actually start a Jupyter server when connecting to
+ # kernels, so the Jupyter API won't work. Fortunately, it's easy to
+ # check if the notebook is being run through VS Code, and to get its
+ # absolute path, if so.
+ # environment variable defined only if running in VS Code
+ if os.getenv('VSCODE_PID') is not None:
+ # global variable that holds absolute path to notebook file
+ return config.ipython_shell.user_ns['__vsc_ipynb_file__']
+
# shouldn't ever get here, but just in case
raise RuntimeError("Could not find notebook path for current kernel")
@@ -855,10 +865,24 @@ def use_default_project():
if isinstance(config._ipython_shell, TerminalInteractiveShell):
proj_name = "ipython-shell"
else:
- proj_name = get_notebook_path()
+ try:
+ proj_name = get_notebook_path()
+ except RuntimeError:
+ # failed to identify the notebook's name/path for some
+ # reason. This may happen if the notebook is being run
+ # through an IDE or other application that accesses the
+ # notebook kernel in a non-standard way, such that the
+ # Jupyter server is never launched. In this case, fall back
+ # to a generic project so smuggled packages are still
+ # isolated from the user's main environment
+ proj_name = "davos-fallback"
+ warnings.warn(
+ "Failed to identify notebook path. Falling back to generic "
+ "default project"
+ )
# will always be an absolute path to a real Jupyter notebook file,
- # or name of real Colab notebook, so we can skip project type
- # decision logic
+ # name of real Colab notebook, or one of the non-path strings
+ # explicitly set above, so we can skip project type decision logic
default_project = ConcreteProject(proj_name)
config.project = default_project
diff --git a/paper/main.tex b/paper/main.tex
index 6f69c84e..61896d69 100644
--- a/paper/main.tex
+++ b/paper/main.tex
@@ -389,7 +389,7 @@ \subsubsection{Projects}\label{subsec:projects}
Standard approaches to installing packages from within a notebook can alter the local Python environment in potentially unexpected and undesired ways. For example, running a notebook that installs its dependencies via system shell commands (prefixed with ``\texttt{!}'') or IPython magic commands (prefixed with ``\texttt{\%}'') may cause other existing packages in the user's environment to be uninstalled and replaced with alternate versions. This can lead to incompatibilities between installed packages, affect the behavior of the user's other scripts or notebooks, or even interfere with system applications.
-To prevent Davos-enhanced notebooks from having unwanted side-effects on the user's environment, Davos automatically isolates packages installed via \texttt{smuggle} statements using a custom scheme called ``projects.'' Functionally, a Davos project is similar to a standard Python virtual environment (e.g., created with the standard library's \texttt{venv} module or a third-party tool like \texttt{virtualenv}~\cite{BickEtal07}): it consists of a directory (within a hidden \texttt{.davos} folder in the user's home directory) that houses third-party packages needed for a particular project or task. However, Davos projects do not need to be manually activated and deactivated, do not contain separate Python or \texttt{pip} executables, and \textit{extend} the user's main Python environment rather than replace it.
+To prevent Davos-enhanced notebooks from having unwanted side-effects on the user's environment, Davos automatically isolates packages installed via \texttt{smuggle} statements using a custom scheme called ``projects.'' Functionally, a Davos project is similar to a standard Python virtual environment (e.g., created with the standard library's \texttt{venv} module or a third-party tool like \texttt{virtualenv}~\cite{BickEtal07}): it consists of a directory (within a hidden \texttt{.davos} folder in the user's home directory) that houses third-party packages needed for a particular project or task. However, unlike standard virtual environments, Davos projects do not need to be manually activated and deactivated, do not contain separate Python or \texttt{pip} executables, and \textit{extend} the user's main Python environment rather than replace it.
When Davos is imported into a notebook, a notebook-specific project directory is automatically created (if it does not exist already).
%When Davos is imported into a notebook, a notebook-specific project directory is automatically created (if it does not exist already), named for the absolute path to the notebook file.
@@ -537,7 +537,7 @@ \subsubsection{Configuring and querying Davos}\label{subsec:config}
program throws an error, both its stdout and stderr streams will be
displayed alongside the Python traceback to allow for debugging.
-\item \texttt{.project}: This attribute is a string that specifies the name of
+\item \texttt{.project}: \textcolor{red}{\textbf{TODO: fix this}} This attribute is a string that specifies the name of
the ``project'' associated with the current notebook. As described in
Section~\ref{subsec:projects}, a notebook's project determines where and how
any \texttt{smuggle}d dependencies are installed if they are not available in
diff --git a/setup.cfg b/setup.cfg
index 07da349b..53298318 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = davos
-version = 0.2.2
+version = 0.2.3
description = Install and manage Python packages at runtime using the "smuggle" statement.
long_description = file: README.md
long_description_content_type = text/markdown
| `get_notebook_path` doesn't work when running with VSCode
I was attempting to run some Jupyter notebooks locally (testing Chatify for Neuromatch), and found that `davos` can't be imported if the notebooks are run in VSCode. The `get_notebook_path` function does not work with VSCode as the notebooks do not appear when running the command `jupyter notebook list`.
The error is: `RuntimeError: Could not find notebook path for current kernel`.
I appreciate this is probably not a pressing concern, but might be good to note, particularly to Neuromatch students.
| thanks @le-big-mac --- we're on it! we were able to replicate this issue in VS Code and we're working on a plan for handling it. | 2023-09-06T04:34:44 | 0.0 | [] | [] |
||
adafruit/circup | adafruit__circup-205 | 38dd524b453aaab1a6f12507ea77493440838606 | diff --git a/circup/backends.py b/circup/backends.py
index 754cbac..4e2d1b7 100644
--- a/circup/backends.py
+++ b/circup/backends.py
@@ -589,14 +589,31 @@ def get_file_path(self, filename):
def is_device_present(self):
"""
- returns True if the device is currently connected
+ returns True if the device is currently connected and running supported version
"""
try:
- _ = self.session.get(f"{self.device_location}/cp/version.json")
- return True
+ with self.session.get(f"{self.device_location}/cp/version.json") as r:
+ r.raise_for_status()
+ web_api_version = r.json().get("web_api_version")
+ if web_api_version is None:
+ self.logger.error("Unable to get web API version from device.")
+ click.secho("Unable to get web API version from device.", fg="red")
+ return False
+
+ if web_api_version < 4:
+ self.logger.error(
+ f"Device running unsupported web API version {web_api_version} < 4."
+ )
+ click.secho(
+ f"Device running unsupported web API version {web_api_version} < 4.",
+ fg="red",
+ )
+ return False
except requests.exceptions.ConnectionError:
return False
+ return True
+
def get_device_versions(self):
"""
Returns a dictionary of metadata from modules on the connected device.
| web workflow does not work with CP 8.2.x due to API differences
Trying web workflow against ESP32 Feather V2 running CP 8.2.10, freshly installed:
```
$ ./venv/bin/circup --verbose --host 172.40.0.11 --password XXX list
Logging to /Users/foo/Library/Logs/circup/circup.log
03/06/2024 22:03:23 INFO: ### Started Circup ###
03/06/2024 22:03:23 INFO: Checking for a newer version of circup
03/06/2024 22:03:23 INFO: Requesting redirect information: https://github.com/adafruit/circuitpython/releases/latest
03/06/2024 22:03:23 INFO: Tag: '8.2.10'
Found device at http://:[email protected], running CircuitPython 8.2.10.
03/06/2024 22:03:24 INFO: List
03/06/2024 22:03:24 INFO: Using bundles: adafruit/Adafruit_CircuitPython_Bundle, adafruit/CircuitPython_Community_Bundle, circuitpython/CircuitPython_Org_Bundle
03/06/2024 22:03:24 ERROR: list indices must be integers or slices, not str
Traceback (most recent call last):
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/__init__.py", line 598, in find_modules
device_modules = backend.get_device_versions()
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 608, in get_device_versions
return self.get_modules(urljoin(self.device_location, self.LIB_DIR_PATH))
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 64, in get_modules
return self._get_modules(device_url)
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 367, in _get_modules
return self._get_modules_http(device_lib_path)
File "/Users/vladimirkotal/Pi/circup/venv/lib/python3.9/site-packages/circup/backends.py", line 388, in _get_modules_http
for entry in r.json()["files"]:
TypeError: list indices must be integers or slices, not str
There was a problem: list indices must be integers or slices, not str
```
Checking the traffic dump the JSON returned for the request is merely an array.
It works with CP 9.0.0 beta2.
| I wonder if the web workflow API is documented somewhere.
https://docs.circuitpython.org/en/latest/docs/workflows.html#web
Thanks. I almost forgot that I used that for the initial set of changes of the web workflow support.
On Thu, Mar 7, 2024, at 14:36, anecdata wrote:
>
>
> https://docs.circuitpython.org/en/latest/docs/workflows.html#web
>
>
> â
> Reply to this email directly, view it on GitHub <https://github.com/adafruit/circup/issues/204#issuecomment-1983523831>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/AAWMMDBNWQUIJFPDJ3CKFPTYXBUPFAVCNFSM6AAAAABEJZ5ADOVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMYTSOBTGUZDGOBTGE>.
> You are receiving this because you authored the thread.Message ID: ***@***.***>
>
The difference is that CP 8.2.10 has `web_api_version` = 2 and returns `[]` on empty directory listing, while CP 9.0.0 has `web_api_version` = 4 and returns dictionary/object with the `files` value being empty array.
I think the answer to this is to check the API version and refuse anything strictly lower than 4. | 2024-03-07T20:41:57 | 0.0 | [] | [] |
||
ami-iit/adam | ami-iit__adam-63 | 4f36ed48c6c244b19318177906e0cb6ac9634332 | diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml
index 8ba32be..41be665 100644
--- a/.github/workflows/black.yml
+++ b/.github/workflows/black.yml
@@ -1,6 +1,7 @@
name: Black action
on:
+ pull_request:
push:
branches:
- main
@@ -9,11 +10,11 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: black
- uses: lgeiger/[email protected]
+ uses: psf/black@stable
with:
- args: .
+ options: "--check --verbose"
- name: Check for modified files
id: git-check
run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)
diff --git a/src/adam/model/std_factories/std_model.py b/src/adam/model/std_factories/std_model.py
index 41d8af6..ffeff71 100644
--- a/src/adam/model/std_factories/std_model.py
+++ b/src/adam/model/std_factories/std_model.py
@@ -7,11 +7,12 @@
from adam.core.spatial_math import SpatialMath
from adam.model import ModelFactory, StdJoint, StdLink
+
def urdf_remove_sensors_tags(xml_string):
# Parse the XML string
root = ET.fromstring(xml_string)
- # Find and remove all tags named "sensor" that are child of
+ # Find and remove all tags named "sensor" that are child of
# root node (i.e. robot)
for sensors_tag in root.findall("sensor"):
root.remove(sensors_tag)
@@ -21,6 +22,7 @@ def urdf_remove_sensors_tags(xml_string):
return modified_xml_string
+
class URDFModelFactory(ModelFactory):
"""This factory generates robot elements from urdf_parser_py
@@ -36,17 +38,19 @@ def __init__(self, path: str, math: SpatialMath):
raise FileExistsError(path)
# Read URDF, but before passing it to urdf_parser_py get rid of all sensor tags
- # sensor tags are valid elements of URDF (see ),
+ # sensor tags are valid elements of URDF (see ),
# but they are ignored by urdf_parser_py, that complains every time it sees one.
# As there is nothing to be fixed in the used models, and it is not useful
# to have a useless and noisy warning, let's remove before hands all the sensor elements,
# that anyhow are not parser by urdf_parser_py or adam
# See https://github.com/ami-iit/ADAM/issues/59
- xml_file = open(path, 'r')
+ xml_file = open(path, "r")
xml_string = xml_file.read()
xml_file.close()
xml_string_without_sensors_tags = urdf_remove_sensors_tags(xml_string)
- self.urdf_desc = urdf_parser_py.urdf.URDF.from_xml_string(xml_string_without_sensors_tags)
+ self.urdf_desc = urdf_parser_py.urdf.URDF.from_xml_string(
+ xml_string_without_sensors_tags
+ )
self.name = self.urdf_desc.name
def get_joints(self) -> List[StdJoint]:
| Black action is failing
The github action that verifies that the code is formatted following [black](https://github.com/psf/black) standards is failing.
See https://github.com/ami-iit/adam/actions/runs/7221531028
| Have you considered using the [`psf/black@stable`](https://github.com/psf/black/blob/main/action/main.py) action from the original `black` repo? | 2024-01-12T12:04:32 | 0.0 | [] | [] |
||
ymcui/Chinese-LLaMA-Alpaca | ymcui__Chinese-LLaMA-Alpaca-555 | 69045db949000b1c635b4b36b26e048f9d57580f | diff --git a/scripts/training/run_clm_pt_with_peft.py b/scripts/training/run_clm_pt_with_peft.py
index e8ad9f7..67ef2c6 100644
--- a/scripts/training/run_clm_pt_with_peft.py
+++ b/scripts/training/run_clm_pt_with_peft.py
@@ -57,6 +57,29 @@
from sklearn.metrics import accuracy_score
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
+from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
+
+
+class SavePeftModelCallback(transformers.TrainerCallback):
+ def save_model(self, args, state, kwargs):
+ if state.best_model_checkpoint is not None:
+ checkpoint_folder = os.path.join(state.best_model_checkpoint, "pt_lora_model")
+ else:
+ checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
+
+ peft_model_path = os.path.join(checkpoint_folder, "pt_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
+ def on_save(self, args, state, control, **kwargs):
+ self.save_model(args, state, kwargs)
+ return control
+
+ def on_train_end(self, args, state, control, **kwargs):
+ peft_model_path = os.path.join(args.output_dir, "pt_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
def accuracy(predictions, references, normalize=True, sample_weight=None):
return {
@@ -64,6 +87,8 @@ def accuracy(predictions, references, normalize=True, sample_weight=None):
accuracy_score(references, predictions, normalize=normalize, sample_weight=sample_weight)
)
}
+
+
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
@@ -72,6 +97,7 @@ def compute_metrics(eval_preds):
preds = preds[:, :-1].reshape(-1)
return accuracy(predictions=preds, references=labels)
+
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
@@ -126,24 +152,6 @@ def fault_tolerance_data_collator(features: List) -> Dict[str, Any]:
return batch
-class GroupTextsBuilder:
- def __init__(self,max_seq_length):
- self.max_seq_length = max_seq_length
- def __call__(self, examples):
- # Concatenate all texts.
- firsts = {k:examples[k][0][0] for k in examples.keys()}
- lasts = {k:examples[k][0][-1] for k in examples.keys()}
- contents = {k:sum([vi[1:-1] for vi in v],[]) for k,v in examples.items()}
- total_length = len(contents[list(examples.keys())[0]])
-
- content_length = self.max_seq_length - 2
- if total_length >= content_length:
- total_length = (total_length // content_length ) * content_length
- # Split by chunks of max_len.
- result = {
- k: [ [firsts[k]] + t[i : i + content_length] + [lasts[k]] for i in range(0, total_length, content_length)] for k, t in contents.items()}
- return result
-
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@@ -297,6 +305,7 @@ def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
+
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
@@ -307,8 +316,10 @@ class MyTrainingArguments(TrainingArguments):
debug_mode : Optional[bool] = field(default=False)
peft_path : Optional[str] = field(default=None)
+
logger = logging.getLogger(__name__)
+
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
@@ -576,7 +587,7 @@ def group_texts(examples):
if training_args.do_eval and not is_torch_tpu_available()
else None,
)
-
+ trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
checkpoint = None
@@ -585,7 +596,6 @@ def group_texts(examples):
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
- trainer.save_model()
metrics = train_result.metrics
@@ -598,19 +608,6 @@ def group_texts(examples):
trainer.save_metrics("train", metrics)
trainer.save_state()
- import shutil
- from transformers.modeling_utils import unwrap_model
- lora_path=os.path.join(training_args.output_dir,'pt_lora_model')
- os.makedirs(lora_path, exist_ok=True)
- try:
- unwrap_model(model).peft_config.save_pretrained(lora_path)
- except AttributeError:
- unwrap_model(model).peft_config['default'].save_pretrained(lora_path)
- shutil.copyfile(
- os.path.join(training_args.output_dir,'pytorch_model.bin'),
- os.path.join(lora_path,'adapter_model.bin'))
- tokenizer.save_pretrained(lora_path)
-
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
@@ -629,7 +626,5 @@ def group_texts(examples):
trainer.save_metrics("eval", metrics)
-
-
if __name__ == "__main__":
main()
diff --git a/scripts/training/run_clm_sft_with_peft.py b/scripts/training/run_clm_sft_with_peft.py
index 276de40..21f108d 100644
--- a/scripts/training/run_clm_sft_with_peft.py
+++ b/scripts/training/run_clm_sft_with_peft.py
@@ -53,8 +53,7 @@
from transformers.utils.versions import require_version
from peft import LoraConfig, TaskType, get_peft_model, PeftModel, get_peft_model_state_dict
-
-
+from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
IGNORE_INDEX = -100
@@ -69,6 +68,27 @@
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
+class SavePeftModelCallback(transformers.TrainerCallback):
+ def save_model(self, args, state, kwargs):
+ if state.best_model_checkpoint is not None:
+ checkpoint_folder = os.path.join(state.best_model_checkpoint, "sft_lora_model")
+ else:
+ checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
+
+ peft_model_path = os.path.join(checkpoint_folder, "sft_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
+ def on_save(self, args, state, control, **kwargs):
+ self.save_model(args, state, kwargs)
+ return control
+
+ def on_train_end(self, args, state, control, **kwargs):
+ peft_model_path = os.path.join(args.output_dir, "sft_lora_model")
+ kwargs["model"].save_pretrained(peft_model_path)
+ kwargs["tokenizer"].save_pretrained(peft_model_path)
+
+
@dataclass
class ModelArguments:
"""
@@ -182,6 +202,7 @@ class DataTrainingArguments:
max_seq_length: Optional[int] = field(default=512)
+
@dataclass
class MyTrainingArguments(TrainingArguments):
trainable : Optional[str] = field(default="q_proj,v_proj")
@@ -192,8 +213,10 @@ class MyTrainingArguments(TrainingArguments):
peft_path : Optional[str] = field(default=None)
force_resize_embeddings: bool = field(default=False)
+
logger = logging.getLogger(__name__)
+
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, MyTrainingArguments))
@@ -248,7 +271,6 @@ def main():
# Set seed before initializing model.
set_seed(training_args.seed)
-
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
@@ -292,7 +314,6 @@ def main():
eval_dataset=None
train_dataset = None
-
if training_args.do_train:
with training_args.main_process_first(desc="loading and tokenization"):
path = Path(data_args.dataset_dir)
@@ -321,7 +342,6 @@ def main():
logger.info("eval example:")
logger.info(tokenizer.decode(eval_dataset[0]['input_ids']))
-
if model_args.model_name_or_path:
torch_dtype = (
model_args.torch_dtype
@@ -338,7 +358,6 @@ def main():
torch_dtype=torch_dtype,
low_cpu_mem_usage=True
)
-
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
@@ -381,7 +400,6 @@ def main():
lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())
).__get__(model, type(model))
-
# Initialize our Trainer
trainer = Trainer(
model=model,
@@ -391,6 +409,7 @@ def main():
tokenizer=tokenizer,
data_collator=data_collator,
)
+ trainer.add_callback(SavePeftModelCallback)
# Training
if training_args.do_train:
@@ -400,7 +419,6 @@ def main():
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
- trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
@@ -410,19 +428,6 @@ def main():
trainer.save_metrics("train", metrics)
trainer.save_state()
- import shutil
- from transformers.modeling_utils import unwrap_model
- lora_path=os.path.join(training_args.output_dir,'sft_lora_model')
- os.makedirs(lora_path, exist_ok=True)
- try:
- unwrap_model(model).peft_config.save_pretrained(lora_path)
- except AttributeError:
- unwrap_model(model).peft_config['default'].save_pretrained(lora_path)
- shutil.copyfile(
- os.path.join(training_args.output_dir,'pytorch_model.bin'),
- os.path.join(lora_path,'adapter_model.bin'))
- tokenizer.save_pretrained(lora_path)
-
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
@@ -439,8 +444,6 @@ def main():
trainer.save_metrics("eval", metrics)
-
-
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
@@ -464,5 +467,6 @@ def smart_tokenizer_and_embedding_resize(
output_embeddings[-num_new_tokens:] = output_embeddings_avg
return num_new_tokens
+
if __name__ == "__main__":
main()
| è®ç»ä¹åä¿åæ件æ¶æ示æ¥éï¼è¯´output/pytorch_model.bin ä¸åå¨
### 详ç»æè¿°é®é¢
è®ç»ä¹åä¿åæ件æ¶æ示æ¥éï¼è¯´output/pytorch_model.bin ä¸åå¨ï¼ä½å®é
æ件æ¯åå¨ç
<img width="762" alt="image" src="https://github.com/ymcui/Chinese-LLaMA-Alpaca/assets/6229526/517ca191-bb60-4e10-bd20-66c3594fea4f">
è®ç»å½ä»¤
```
wandb disabled
lr=2e-4
lora_rank=64
lora_alpha=128
lora_trainable="q_proj,v_proj,k_proj,o_proj,gate_proj,down_proj,up_proj"
modules_to_save="embed_tokens,lm_head"
lora_dropout=0.05
pretrained_model=/data/llama/7B
#pretrained_model=/data/output/chinese-llama-alpaca-7b-lora
chinese_tokenizer_path=/data/chinese-llama-plus-lora-7b
#chinese_tokenizer_path=/data/output/chinese-llama-alpaca-7b-lora
dataset_dir=/data/pt_data/
data_cache=/data/temp_data_cache_dir/
per_device_train_batch_size=1
per_device_eval_batch_size=1
training_steps=100
gradient_accumulation_steps=1
output_dir=/data/output/chinese-llama-lora-7b-0609-v1.1
deepspeed_config_file=ds_zero2_no_offload.json
torchrun --nnodes 1 --nproc_per_node 2 run_clm_pt_with_peft.py \
--deepspeed ${deepspeed_config_file} \
--model_name_or_path ${pretrained_model} \
--tokenizer_name_or_path ${chinese_tokenizer_path} \
--dataset_dir ${dataset_dir} \
--data_cache_dir ${data_cache} \
--validation_split_percentage 0.001 \
--per_device_train_batch_size ${per_device_train_batch_size} \
--per_device_eval_batch_size ${per_device_eval_batch_size} \
--do_train \
--seed $RANDOM \
--fp16 \
--max_steps ${training_steps} \
--lr_scheduler_type cosine \
--learning_rate ${lr} \
--warmup_ratio 0.05 \
--weight_decay 0.01 \
--logging_strategy steps \
--logging_steps 10 \
--save_strategy steps \
--save_total_limit 100 \
--save_steps 1200 \
--gradient_accumulation_steps ${gradient_accumulation_steps} \
--preprocessing_num_workers 8 \
--block_size 512 \
--output_dir ${output_dir} \
--overwrite_output_dir \
--ddp_timeout 30000 \
--logging_first_step True \
--lora_rank ${lora_rank} \
--lora_alpha ${lora_alpha} \
--trainable ${lora_trainable} \
--modules_to_save ${modules_to_save} \
--lora_dropout ${lora_dropout} \
--torch_dtype float16 \
--gradient_checkpointing \
--ddp_find_unused_parameters False
```
### åèä¿¡æ¯
CPU 64æ ¸
å
å 256G
æ¾å¡ ä¸¤å¼ A30
#### ä¾èµæ
åµï¼ä»£ç ç±»é®é¢å¡å¿
æä¾ï¼
```
transformers 4.28.1
peft 0.3.0
torch 2.0.1
deepspeed 0.9.2
```
#### è¿è¡æ¥å¿ææªå¾
<img width="1257" alt="image" src="https://github.com/ymcui/Chinese-LLaMA-Alpaca/assets/6229526/2502bd86-76d8-4181-b9b9-933267e35969">
### å¿
æ¥é¡¹ç®
*å°[ ]ä¸å¡«å
¥xï¼è¡¨ç¤ºæ对é©ãæé®æ¶å é¤è¿è¡ã åä¸é¡¹è¯·æèªå·±çé®é¢ç±»åä¿ç符åçé项ã*
- [x] **åºç¡æ¨¡å**ï¼LLaMA-Plus 7B
- [x] **è¿è¡ç³»ç»**ï¼Linux
- [x] **é®é¢åç±»**ï¼æ¨¡åè®ç»ä¸ç²¾è°
- [x] **模åæ£ç¡®æ§æ£æ¥**ï¼å¡å¿
æ£æ¥æ¨¡åç[SHA256.md](https://github.com/ymcui/Chinese-LLaMA-Alpaca/blob/main/SHA256.md)ï¼æ¨¡åä¸å¯¹çæ
åµä¸æ æ³ä¿è¯ææåæ£å¸¸è¿è¡ã
- [x] ï¼å¿
éï¼ç±äºç¸å
³ä¾èµé¢ç¹æ´æ°ï¼è¯·ç¡®ä¿æç
§[Wiki](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki)ä¸çç¸å
³æ¥éª¤æ§è¡
- [x] ï¼å¿
éï¼æå·²é
读[FAQç« è](https://github.com/ymcui/Chinese-LLaMA-Alpaca/wiki/常è§é®é¢)并ä¸å·²å¨Issueä¸å¯¹é®é¢è¿è¡äºæç´¢ï¼æ²¡ææ¾å°ç¸ä¼¼é®é¢å解å³æ¹æ¡
- [ ] ï¼å¿
éï¼ç¬¬ä¸æ¹æ件é®é¢ï¼ä¾å¦[llama.cpp](https://github.com/ggerganov/llama.cpp)ã[text-generation-webui](https://github.com/oobabooga/text-generation-webui)ã[LlamaChat](https://github.com/alexrozanski/LlamaChat)çï¼åæ¶å»ºè®®å°å¯¹åºç项ç®ä¸æ¥æ¾è§£å³æ¹æ¡
| deepspeed使ç¨çåªä¸ªçæ¬äºï¼
>
deepspeed 0.9.2
æä¹åºç°äºè¿ä¸ªé®é¢ï¼ç¨çæ¯zero3çç¥ï¼ä¿åçpytorch_model.binæ¯13Gï¼ä¿åé度ç¼æ
¢ï¼å¯¼è´å¤å¶æ件æ¶ï¼pytorch_model.binè¿æ²¡æçæåºæ¥ã
æ注éäºå¤å¶æ件è¿ä¸è¡ï¼ç¶åèªè¡å¤å¶çpytorch_model.binã
è¿ä¸ªæä¹éå°è¿ï¼åºè¯¥ææ°ççBUGï¼ä¿®æ¹äºèæ¬run_clm_sft_with_peft.pyåæ§è¡å°±å¥½äº

@ymcui @airaria 建议å主修å¤ä¸ | 2023-06-10T02:33:39 | 0.0 | [] | [] |
||
griffithlab/VAtools | griffithlab__VAtools-50 | 9cc919797e5c8a58b264bdb4257bac853e31c882 | diff --git a/vatools/ref_transcript_mismatch_reporter.py b/vatools/ref_transcript_mismatch_reporter.py
index 5fcfe10..311e5a5 100644
--- a/vatools/ref_transcript_mismatch_reporter.py
+++ b/vatools/ref_transcript_mismatch_reporter.py
@@ -16,6 +16,8 @@ def resolve_consequence(consequence_string):
if 'start_lost' in consequences:
consequence = None
+ elif 'stop_retained_variant' in consequences:
+ consequence = None
elif 'frameshift_variant' in consequences:
consequence = 'FS'
elif 'missense_variant' in consequences:
@@ -137,10 +139,18 @@ def main(args_input = sys.argv[1:]):
wildtype_amino_acid = wildtype_amino_acid.split('X')[0]
if key == 'Protein_position':
protein_position = value
+ if '/' in value:
+ protein_position = value.split('/')[0]
+ if protein_position == '-':
+ protein_position = value.split('/')[1]
if key == 'Consequence':
variant_type = resolve_consequence(value)
if key == 'Feature':
transcript = value
+
+ if '*' in full_wildtype_sequence:
+ continue
+
if variant_type == 'missense' or variant_type == 'inframe_ins':
if '-' in protein_position:
position = int(protein_position.split('-', 1)[0]) - 1
@@ -153,6 +163,9 @@ def main(args_input = sys.argv[1:]):
else:
continue
+ if position == '-':
+ continue
+
if wildtype_amino_acid != '-':
processable_transcript_count += 1
processable_variant = True
| ref_transcript_mismatch_reporter: `ValueError: invalid literal for int() with base 10: '71/98'`
I get the following error when running vatools
```
Traceback (most recent call last):
File "/home/el/miniconda3/envs/vatools/bin/ref-transcript-mismatch-reporter", line 8, in <module>
sys.exit(main())
File "/home/el/miniconda3/envs/vatools/lib/python3.8/site-packages/vatools/ref_transcript_mismatch_reporter.py", line 148, in main
position = int(protein_position) - 1
ValueError: invalid literal for int() with base 10: '71/98'
```
I ran `pip show vatools` to make sure it works and got the following output:
```
Name: vatools
Version: 5.0.0
Summary: A tool for annotating VCF files with expression and readcount data
Home-page: https://github.com/griffithlab/vatools
Author: Susanna Kiwala, Chris Miller
Author-email: [email protected]
License: MIT License
Location: /home/el/miniconda3/envs/vatools/lib/python3.8/site-packages
Requires: vcfpy, pysam, gtfparse, testfixtures, pandas
Required-by:
```
Could these issues be related?
regards
El
_Originally posted by @iichelhadi in https://github.com/griffithlab/pVACtools/issues/692#issuecomment-902470748_
| 2021-08-25T19:33:58 | 0.0 | [] | [] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 8