text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Find the first starred element in the specified row. Returns
<END_TASK>
<USER_TASK:>
Description:
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
""" |
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col |
<SYSTEM_TASK:>
Find the first starred element in the specified row. Returns
<END_TASK>
<USER_TASK:>
Description:
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
""" |
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row |
<SYSTEM_TASK:>
Find the first prime element in the specified row. Returns
<END_TASK>
<USER_TASK:>
Description:
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
""" |
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col |
<SYSTEM_TASK:>
Update contingency table with new values without creating a new object.
<END_TASK>
<USER_TASK:>
Description:
def update(self, a, b, c, d):
"""
Update contingency table with new values without creating a new object.
""" |
self.table.ravel()[:] = [a, b, c, d]
self.N = self.table.sum() |
<SYSTEM_TASK:>
Write each decision tree in an ensemble to a file.
<END_TASK>
<USER_TASK:>
Description:
def output_tree_ensemble(tree_ensemble_obj, output_filename, attribute_names=None):
"""
Write each decision tree in an ensemble to a file.
Parameters
----------
tree_ensemble_obj : sklearn.ensemble object
Random Forest or Gradient Boosted Regression object
output_filename : str
File where trees are written
attribute_names : list
List of attribute names to be used in place of indices if available.
""" |
for t, tree in enumerate(tree_ensemble_obj.estimators_):
print("Writing Tree {0:d}".format(t))
out_file = open(output_filename + ".{0:d}.tree", "w")
#out_file.write("Tree {0:d}\n".format(t))
tree_str = print_tree_recursive(tree.tree_, 0, attribute_names)
out_file.write(tree_str)
#out_file.write("\n")
out_file.close()
return |
<SYSTEM_TASK:>
Recursively writes a string representation of a decision tree object.
<END_TASK>
<USER_TASK:>
Description:
def print_tree_recursive(tree_obj, node_index, attribute_names=None):
"""
Recursively writes a string representation of a decision tree object.
Parameters
----------
tree_obj : sklearn.tree._tree.Tree object
A base decision tree object
node_index : int
Index of the node being printed
attribute_names : list
List of attribute names
Returns
-------
tree_str : str
String representation of decision tree in the same format as the parf library.
""" |
tree_str = ""
if node_index == 0:
tree_str += "{0:d}\n".format(tree_obj.node_count)
if tree_obj.feature[node_index] >= 0:
if attribute_names is None:
attr_val = "{0:d}".format(tree_obj.feature[node_index])
else:
attr_val = attribute_names[tree_obj.feature[node_index]]
tree_str += "b {0:d} {1} {2:0.4f} {3:d} {4:1.5e}\n".format(node_index,
attr_val,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index],
tree_obj.threshold[node_index])
else:
if tree_obj.max_n_classes > 1:
leaf_value = "{0:d}".format(tree_obj.value[node_index].argmax())
else:
leaf_value = "{0}".format(tree_obj.value[node_index][0][0])
tree_str += "l {0:d} {1} {2:0.4f} {3:d}\n".format(node_index,
leaf_value,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index])
if tree_obj.children_left[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_left[node_index], attribute_names)
if tree_obj.children_right[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_right[node_index], attribute_names)
return tree_str |
<SYSTEM_TASK:>
Fitness function in the validation set
<END_TASK>
<USER_TASK:>
Description:
def fitness_vs(self, v):
"""Fitness function in the validation set
In classification it uses BER and RSE in regression""" |
base = self._base
if base._classifier:
if base._multiple_outputs:
v.fitness_vs = v._error
# if base._fitness_function == 'macro-F1':
# v.fitness_vs = v._error
# elif base._fitness_function == 'BER':
# v.fitness_vs = v._error
# elif base._fitness_function == 'macro-Precision':
# v.fitness_vs = v._error
# elif base._fitness_function == 'accDotMacroF1':
# v.fitness_vs = v._error
# elif base._fitness_function == 'macro-RecallF1':
# v.fitness_vs = v._error
# elif base._fitness_function == 'F1':
# v.fitness_vs = v._error
# else:
# v.fitness_vs = - v._error.dot(base._mask_vs) / base._mask_vs.sum()
else:
v.fitness_vs = -((base.y - v.hy.sign()).sign().fabs() *
base._mask_vs).sum()
else:
mask = base._mask
y = base.y
hy = v.hy
if not isinstance(mask, list):
mask = [mask]
y = [y]
hy = [hy]
fit = []
for _mask, _y, _hy in zip(mask, y, hy):
m = (_mask + -1).fabs()
x = _y * m
y = _hy * m
a = (x - y).sq().sum()
b = (x + -x.sum() / x.size()).sq().sum()
fit.append(-a / b)
v.fitness_vs = np.mean(fit) |
<SYSTEM_TASK:>
Set the fitness to a new node.
<END_TASK>
<USER_TASK:>
Description:
def set_fitness(self, v):
"""Set the fitness to a new node.
Returns false in case fitness is not finite""" |
base = self._base
self.fitness(v)
if not np.isfinite(v.fitness):
self.del_error(v)
return False
if base._tr_fraction < 1:
self.fitness_vs(v)
if not np.isfinite(v.fitness_vs):
self.del_error(v)
return False
self.del_error(v)
return True |
<SYSTEM_TASK:>
Segment forecast tracks to only output data contined within a
<END_TASK>
<USER_TASK:>
Description:
def output_sector_csv(self,csv_path,file_dict_key,out_path):
"""
Segment forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
csv_path(str): Path to the full CONUS csv file.
file_dict_key(str): Dictionary key for the csv files,
currently either 'track_step' or 'track_total'
out_path (str): Path to output new segmented csv files.
Returns:
Segmented forecast tracks in a csv file.
""" |
csv_file = csv_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
self.member,
self.run_date.strftime(self.date_format))
if exists(csv_file):
csv_data = pd.read_csv(csv_file)
if self.inds is None:
lon_obj = csv_data.loc[:,"Centroid_Lon"]
lat_obj = csv_data.loc[:,"Centroid_Lat"]
self.inds = np.where((self.ne_lat>=lat_obj)&(self.sw_lat<=lat_obj)\
&(self.ne_lon>=lon_obj)&(self.sw_lon<=lon_obj))[0]
if np.shape(self.inds)[0] > 0:
csv_data = csv_data.reindex(np.array(self.inds))
sector_csv_filename = out_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
self.member,
self.run_date.strftime(self.date_format))
print("Output sector csv file " + sector_csv_filename)
csv_data.to_csv(sector_csv_filename,
na_rep="nan",
float_format="%0.5f",
index=False)
os.chmod(sector_csv_filename, 0o666)
else:
print('No {0} {1} sector data found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
else:
print('No {0} {1} csv file found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
return |
<SYSTEM_TASK:>
Return a json-clean dict. Will log info message for failures.
<END_TASK>
<USER_TASK:>
Description:
def clean_dict(d0, clean_item_fn=None):
"""
Return a json-clean dict. Will log info message for failures.
""" |
clean_item_fn = clean_item_fn if clean_item_fn else clean_item
d = dict()
for key in d0:
cleaned_item = clean_item_fn(d0[key])
if cleaned_item is not None:
d[key] = cleaned_item
return d |
<SYSTEM_TASK:>
Return a json-clean list. Will log info message for failures.
<END_TASK>
<USER_TASK:>
Description:
def clean_list(l0, clean_item_fn=None):
"""
Return a json-clean list. Will log info message for failures.
""" |
clean_item_fn = clean_item_fn if clean_item_fn else clean_item
l = list()
for index, item in enumerate(l0):
cleaned_item = clean_item_fn(item)
l.append(cleaned_item)
return l |
<SYSTEM_TASK:>
Sample the stack in a thread and print it at regular intervals.
<END_TASK>
<USER_TASK:>
Description:
def sample_stack_all(count=10, interval=0.1):
"""Sample the stack in a thread and print it at regular intervals.""" |
def print_stack_all(l, ll):
l1 = list()
l1.append("*** STACKTRACE - START ***")
code = []
for threadId, stack in sys._current_frames().items():
sub_code = []
sub_code.append("# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
sub_code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
sub_code.append(" %s" % (line.strip()))
if not "in select" in sub_code[-2] and \
not "in wait" in sub_code[-2] and \
not "in print_stack_all" in sub_code[-2] and \
not "in sample_stack_all" in sub_code[-2] and \
not "in checkcache" in sub_code[-2] and \
not "do_sleep" in sub_code[-2] and \
not "sleep" in sub_code[-1] and \
not any(["in do_sample" in s for s in sub_code]):
code.extend(sub_code)
for line in code:
l1.append(line)
l1.append("*** STACKTRACE - END ***")
with l:
ll.extend(l1)
def do_sample():
l = threading.RLock()
ll = list()
for i in range(count):
print_stack_all(l, ll)
time.sleep(interval)
with l:
print("\n".join(ll))
threading.Thread(target=do_sample).start() |
<SYSTEM_TASK:>
Calculate a probability based on the number of grid points in an area that exceed a threshold.
<END_TASK>
<USER_TASK:>
Description:
def neighborhood_probability(self, threshold, radius):
"""
Calculate a probability based on the number of grid points in an area that exceed a threshold.
Args:
threshold:
radius:
Returns:
""" |
weights = disk(radius, dtype=np.uint8)
thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape, dtype=np.float32)
for t in np.arange(self.data.shape[0]):
thresh_data[self.data[t] >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
maximized[maximized < 1] = 0
neighbor_prob[t] = fftconvolve(maximized, weights, mode="same")
thresh_data[:] = 0
neighbor_prob[neighbor_prob < 1] = 0
neighbor_prob /= weights.sum()
return neighbor_prob |
<SYSTEM_TASK:>
Loads data from each ensemble member.
<END_TASK>
<USER_TASK:>
Description:
def load_data(self):
"""
Loads data from each ensemble member.
""" |
for m, member in enumerate(self.members):
mo = ModelOutput(self.ensemble_name, member, self.run_date, self.variable,
self.start_date, self.end_date, self.path, self.map_file, self.single_step)
mo.load_data()
if self.data is None:
self.data = np.zeros((len(self.members), mo.data.shape[0], mo.data.shape[1], mo.data.shape[2]),
dtype=np.float32)
if mo.units == "m":
self.data[m] = mo.data * 1000
self.units = "mm"
else:
self.data[m] = mo.data
if self.units == "":
self.units = mo.units
del mo.data
del mo |
<SYSTEM_TASK:>
Calculate grid-point statistics across ensemble members.
<END_TASK>
<USER_TASK:>
Description:
def point_consensus(self, consensus_type):
"""
Calculate grid-point statistics across ensemble members.
Args:
consensus_type: mean, std, median, max, or percentile_nn
Returns:
EnsembleConsensus containing point statistic
""" |
if "mean" in consensus_type:
consensus_data = np.mean(self.data, axis=0)
elif "std" in consensus_type:
consensus_data = np.std(self.data, axis=0)
elif "median" in consensus_type:
consensus_data = np.median(self.data, axis=0)
elif "max" in consensus_type:
consensus_data = np.max(self.data, axis=0)
elif "percentile" in consensus_type:
percentile = int(consensus_type.split("_")[1])
consensus_data = np.percentile(self.data, percentile, axis=0)
else:
consensus_data = np.zeros(self.data.shape[1:])
consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name,
self.run_date, self.variable, self.start_date, self.end_date, self.units)
return consensus |
<SYSTEM_TASK:>
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at
<END_TASK>
<USER_TASK:>
Description:
def point_probability(self, threshold):
"""
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at
that point.
Args:
threshold: If >= threshold assigns a 1 to member, otherwise 0.
Returns:
EnsembleConsensus
""" |
point_prob = np.zeros(self.data.shape[1:])
for t in range(self.data.shape[1]):
point_prob[t] = np.where(self.data[:, t] >= threshold, 1.0, 0.0).mean(axis=0)
return EnsembleConsensus(point_prob, "point_probability", self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}_{1}".format(threshold,
self.units.replace(" ", "_")),
self.start_date, self.end_date, "") |
<SYSTEM_TASK:>
Hourly probability of exceeding a threshold based on model values within a specified radius of a point.
<END_TASK>
<USER_TASK:>
Description:
def neighborhood_probability(self, threshold, radius, sigmas=None):
"""
Hourly probability of exceeding a threshold based on model values within a specified radius of a point.
Args:
threshold (float): probability of exceeding this threshold
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects containing neighborhood probabilities for each forecast hour.
""" |
if sigmas is None:
sigmas = [0]
weights = disk(radius)
filtered_prob = []
for sigma in sigmas:
filtered_prob.append(EnsembleConsensus(np.zeros(self.data.shape[1:], dtype=np.float32),
"neighbor_prob_r_{0:d}_s_{1:d}".format(radius, sigma),
self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}".format(threshold),
self.start_date, self.end_date, ""))
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
for t in range(self.data.shape[1]):
for m in range(self.data.shape[0]):
thresh_data[self.data[m, t] >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
maximized[maximized < 1] = 0
neighbor_prob += fftconvolve(maximized, weights, mode="same")
neighbor_prob[neighbor_prob < 1] = 0
thresh_data[:] = 0
neighbor_prob /= (self.data.shape[0] * float(weights.sum()))
for s, sigma in enumerate(sigmas):
if sigma > 0:
filtered_prob[s].data[t] = gaussian_filter(neighbor_prob, sigma=sigma)
else:
filtered_prob[s].data[t] = neighbor_prob
neighbor_prob[:] = 0
return filtered_prob |
<SYSTEM_TASK:>
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.
<END_TASK>
<USER_TASK:>
Description:
def period_max_neighborhood_probability(self, threshold, radius, sigmas=None):
"""
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.
Args:
threshold (float): splitting threshold for probability calculatations
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects
""" |
if sigmas is None:
sigmas = [0]
weights = disk(radius)
neighborhood_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
for m in range(self.data.shape[0]):
thresh_data[self.data[m].max(axis=0) >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
neighborhood_prob += fftconvolve(maximized, weights, mode="same")
neighborhood_prob[neighborhood_prob < 1] = 0
neighborhood_prob /= (self.data.shape[0] * float(weights.sum()))
consensus_probs = []
for sigma in sigmas:
if sigma > 0:
filtered_prob = gaussian_filter(neighborhood_prob, sigma=sigma)
else:
filtered_prob = neighborhood_prob
ec = EnsembleConsensus(filtered_prob,
"neighbor_prob_{0:02d}-hour_r_{1:d}_s_{2:d}".format(self.data.shape[1],
radius, sigma),
self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}".format(float(threshold)),
self.start_date, self.end_date, "")
consensus_probs.append(ec)
return consensus_probs |
<SYSTEM_TASK:>
Initializes netCDF file for writing
<END_TASK>
<USER_TASK:>
Description:
def init_file(self, filename, time_units="seconds since 1970-01-01T00:00"):
"""
Initializes netCDF file for writing
Args:
filename: Name of the netCDF file
time_units: Units for the time variable in format "<time> since <date string>"
Returns:
Dataset object
""" |
if os.access(filename, os.R_OK):
out_data = Dataset(filename, "r+")
else:
out_data = Dataset(filename, "w")
if len(self.data.shape) == 2:
for d, dim in enumerate(["y", "x"]):
out_data.createDimension(dim, self.data.shape[d])
else:
for d, dim in enumerate(["y", "x"]):
out_data.createDimension(dim, self.data.shape[d+1])
out_data.createDimension("time", len(self.times))
time_var = out_data.createVariable("time", "i8", ("time",))
time_var[:] = date2num(self.times.to_pydatetime(), time_units)
time_var.units = time_units
out_data.Conventions = "CF-1.6"
return out_data |
<SYSTEM_TASK:>
Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables
<END_TASK>
<USER_TASK:>
Description:
def write_to_file(self, out_data):
"""
Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables
are appended to the current file
Args:
out_data: Full-path and name of output netCDF file
""" |
full_var_name = self.consensus_type + "_" + self.variable
if "-hour" in self.consensus_type:
if full_var_name not in out_data.variables.keys():
var = out_data.createVariable(full_var_name, "f4", ("y", "x"), zlib=True,
least_significant_digit=3, shuffle=True)
else:
var = out_data.variables[full_var_name]
var.coordinates = "y x"
else:
if full_var_name not in out_data.variables.keys():
var = out_data.createVariable(full_var_name, "f4", ("time", "y", "x"), zlib=True,
least_significant_digit=3, shuffle=True)
else:
var = out_data.variables[full_var_name]
var.coordinates = "time y x"
var[:] = self.data
var.units = self.units
var.long_name = self.consensus_type + "_" + self.variable
return |
<SYSTEM_TASK:>
Restore the workspace to the given workspace_uuid.
<END_TASK>
<USER_TASK:>
Description:
def restore(self, workspace_uuid):
"""
Restore the workspace to the given workspace_uuid.
If workspace_uuid is None then create a new workspace and use it.
""" |
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.uuid == workspace_uuid), None)
if workspace is None:
workspace = self.new_workspace()
self._change_workspace(workspace) |
<SYSTEM_TASK:>
Looks for a workspace with workspace_id.
<END_TASK>
<USER_TASK:>
Description:
def ensure_workspace(self, name, layout, workspace_id):
"""Looks for a workspace with workspace_id.
If none is found, create a new one, add it, and change to it.
""" |
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.workspace_id == workspace_id), None)
if not workspace:
workspace = self.new_workspace(name=name, layout=layout, workspace_id=workspace_id)
self._change_workspace(workspace) |
<SYSTEM_TASK:>
Pose a dialog to name and create a workspace.
<END_TASK>
<USER_TASK:>
Description:
def create_workspace(self) -> None:
""" Pose a dialog to name and create a workspace. """ |
def create_clicked(text):
if text:
command = Workspace.CreateWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=_("Workspace"),
accepted_fn=create_clicked, accepted_text=_("Create"),
message_box_id="create_workspace") |
<SYSTEM_TASK:>
Pose a dialog to rename the workspace.
<END_TASK>
<USER_TASK:>
Description:
def rename_workspace(self) -> None:
""" Pose a dialog to rename the workspace. """ |
def rename_clicked(text):
if len(text) > 0:
command = Workspace.RenameWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter new name for workspace"), text=self.__workspace.name,
accepted_fn=rename_clicked, accepted_text=_("Rename"),
message_box_id="rename_workspace") |
<SYSTEM_TASK:>
Pose a dialog to confirm removal then remove workspace.
<END_TASK>
<USER_TASK:>
Description:
def remove_workspace(self):
""" Pose a dialog to confirm removal then remove workspace. """ |
def confirm_clicked():
if len(self.document_model.workspaces) > 1:
command = Workspace.RemoveWorkspaceCommand(self)
command.perform()
self.document_controller.push_undo_command(command)
caption = _("Remove workspace named '{0}'?").format(self.__workspace.name)
self.pose_confirmation_message_box(caption, confirm_clicked, accepted_text=_("Remove Workspace"),
message_box_id="remove_workspace") |
<SYSTEM_TASK:>
Pose a dialog to name and clone a workspace.
<END_TASK>
<USER_TASK:>
Description:
def clone_workspace(self) -> None:
""" Pose a dialog to name and clone a workspace. """ |
def clone_clicked(text):
if text:
command = Workspace.CloneWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=self.__workspace.name,
accepted_fn=clone_clicked, accepted_text=_("Clone"),
message_box_id="clone_workspace") |
<SYSTEM_TASK:>
Given a set of DistributedROC or DistributedReliability objects, this function performs a
<END_TASK>
<USER_TASK:>
Description:
def bootstrap(score_objs, n_boot=1000):
"""
Given a set of DistributedROC or DistributedReliability objects, this function performs a
bootstrap resampling of the objects and returns n_boot aggregations of them.
Args:
score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method
n_boot (int): Number of bootstrap samples
Returns:
An array of DistributedROC or DistributedReliability
""" |
all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)
return all_samples.sum(axis=1) |
<SYSTEM_TASK:>
Update the ROC curve with a set of forecasts and observations
<END_TASK>
<USER_TASK:>
Description:
def update(self, forecasts, observations):
"""
Update the ROC curve with a set of forecasts and observations
Args:
forecasts: 1D array of forecast values
observations: 1D array of observation values.
""" |
for t, threshold in enumerate(self.thresholds):
tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold))
fp = np.count_nonzero((forecasts >= threshold) &
(observations < self.obs_threshold))
fn = np.count_nonzero((forecasts < threshold) &
(observations >= self.obs_threshold))
tn = np.count_nonzero((forecasts < threshold) &
(observations < self.obs_threshold))
self.contingency_tables.iloc[t] += [tp, fp, fn, tn] |
<SYSTEM_TASK:>
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other_roc):
"""
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
""" |
if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds):
self.contingency_tables += other_roc.contingency_tables
else:
print("Input table thresholds do not match.") |
<SYSTEM_TASK:>
Calculate the Probability of Detection and False Alarm Ratio in order to output a performance diagram.
<END_TASK>
<USER_TASK:>
Description:
def performance_curve(self):
"""
Calculate the Probability of Detection and False Alarm Ratio in order to output a performance diagram.
Returns:
pandas.DataFrame containing POD, FAR, and probability thresholds.
""" |
pod = self.contingency_tables["TP"] / (self.contingency_tables["TP"] + self.contingency_tables["FN"])
far = self.contingency_tables["FP"] / (self.contingency_tables["FP"] + self.contingency_tables["TP"])
far[(self.contingency_tables["FP"] + self.contingency_tables["TP"]) == 0] = np.nan
return pd.DataFrame({"POD": pod, "FAR": far, "Thresholds": self.thresholds},
columns=["POD", "FAR", "Thresholds"]) |
<SYSTEM_TASK:>
Calculate the maximum Critical Success Index across all probability thresholds
<END_TASK>
<USER_TASK:>
Description:
def max_csi(self):
"""
Calculate the maximum Critical Success Index across all probability thresholds
Returns:
The maximum CSI as a float
""" |
csi = self.contingency_tables["TP"] / (self.contingency_tables["TP"] + self.contingency_tables["FN"] +
self.contingency_tables["FP"])
return csi.max() |
<SYSTEM_TASK:>
Create an Array of ContingencyTable objects for each probability threshold.
<END_TASK>
<USER_TASK:>
Description:
def get_contingency_tables(self):
"""
Create an Array of ContingencyTable objects for each probability threshold.
Returns:
Array of ContingencyTable objects
""" |
return np.array([ContingencyTable(*ct) for ct in self.contingency_tables.values]) |
<SYSTEM_TASK:>
Read the DistributedROC string and parse the contingency table values from it.
<END_TASK>
<USER_TASK:>
Description:
def from_str(self, in_str):
"""
Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method
""" |
parts = in_str.split(";")
for part in parts:
var_name, value = part.split(":")
if var_name == "Obs_Threshold":
self.obs_threshold = float(value)
elif var_name == "Thresholds":
self.thresholds = np.array(value.split(), dtype=float)
self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns,
data=np.zeros((self.thresholds.size,
self.contingency_tables.columns.size)))
elif var_name in self.contingency_tables.columns:
self.contingency_tables[var_name] = np.array(value.split(), dtype=int) |
<SYSTEM_TASK:>
Update the statistics with a set of forecasts and observations.
<END_TASK>
<USER_TASK:>
Description:
def update(self, forecasts, observations):
"""
Update the statistics with a set of forecasts and observations.
Args:
forecasts (numpy.ndarray): Array of forecast probability values
observations (numpy.ndarray): Array of observation values
""" |
for t, threshold in enumerate(self.thresholds[:-1]):
self.frequencies.loc[t, "Positive_Freq"] += np.count_nonzero((threshold <= forecasts) &
(forecasts < self.thresholds[t+1]) &
(observations >= self.obs_threshold))
self.frequencies.loc[t, "Total_Freq"] += np.count_nonzero((threshold <= forecasts) &
(forecasts < self.thresholds[t+1])) |
<SYSTEM_TASK:>
Ingest another DistributedReliability and add its contents to the current object.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, other_rel):
"""
Ingest another DistributedReliability and add its contents to the current object.
Args:
other_rel: a Distributed reliability object.
""" |
if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds):
self.frequencies += other_rel.frequencies
else:
print("Input table thresholds do not match.") |
<SYSTEM_TASK:>
Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq
<END_TASK>
<USER_TASK:>
Description:
def reliability_curve(self):
"""
Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq
Returns:
pandas.DataFrame
""" |
total = self.frequencies["Total_Freq"].sum()
curve = pd.DataFrame(columns=["Bin_Start", "Bin_End", "Bin_Center",
"Positive_Relative_Freq", "Total_Relative_Freq"])
curve["Bin_Start"] = self.thresholds[:-1]
curve["Bin_End"] = self.thresholds[1:]
curve["Bin_Center"] = 0.5 * (self.thresholds[:-1] + self.thresholds[1:])
curve["Positive_Relative_Freq"] = self.frequencies["Positive_Freq"] / self.frequencies["Total_Freq"]
curve["Total_Relative_Freq"] = self.frequencies["Total_Freq"] / total
return curve |
<SYSTEM_TASK:>
Update the statistics with forecasts and observations.
<END_TASK>
<USER_TASK:>
Description:
def update(self, forecasts, observations):
"""
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
""" |
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0] |
<SYSTEM_TASK:>
Calculates the continuous ranked probability score.
<END_TASK>
<USER_TASK:>
Description:
def crps(self):
"""
Calculates the continuous ranked probability score.
""" |
return np.sum(self.errors["F_2"].values - self.errors["F_O"].values * 2.0 + self.errors["O_2"].values) / \
(self.thresholds.size * self.num_forecasts) |
<SYSTEM_TASK:>
Calculate the continous ranked probability skill score from existing data.
<END_TASK>
<USER_TASK:>
Description:
def crpss(self):
"""
Calculate the continous ranked probability skill score from existing data.
""" |
crps_f = self.crps()
crps_c = self.crps_climo()
return 1.0 - float(crps_f) / float(crps_c) |
<SYSTEM_TASK:>
Return whether the metadata value for the given key exists.
<END_TASK>
<USER_TASK:>
Description:
def has_metadata_value(metadata_source, key: str) -> bool:
"""Return whether the metadata value for the given key exists.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
""" |
desc = session_key_map.get(key)
if desc is not None:
d = getattr(metadata_source, "session_metadata", dict())
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None:
return desc['path'][-1] in d
desc = key_map.get(key)
if desc is not None:
d = getattr(metadata_source, "metadata", dict())
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None:
return desc['path'][-1] in d
raise False |
<SYSTEM_TASK:>
Delete the metadata value for the given key.
<END_TASK>
<USER_TASK:>
Description:
def delete_metadata_value(metadata_source, key: str) -> None:
"""Delete the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<dotted>.<group>.<attribute>' format followed
by the predefined keys. e.g. 'stem.session.instrument' or 'stm.camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
""" |
desc = session_key_map.get(key)
if desc is not None:
d0 = getattr(metadata_source, "session_metadata", dict())
d = d0
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None and desc['path'][-1] in d:
d.pop(desc['path'][-1], None)
metadata_source.session_metadata = d0
return
desc = key_map.get(key)
if desc is not None:
d0 = getattr(metadata_source, "metadata", dict())
d = d0
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None and desc['path'][-1] in d:
d.pop(desc['path'][-1], None)
metadata_source.metadata = d0
return |
<SYSTEM_TASK:>
Calculate the y-axis items dependent on the plot height.
<END_TASK>
<USER_TASK:>
Description:
def calculate_y_ticks(self, plot_height):
"""Calculate the y-axis items dependent on the plot height.""" |
calibrated_data_min = self.calibrated_data_min
calibrated_data_max = self.calibrated_data_max
calibrated_data_range = calibrated_data_max - calibrated_data_min
ticker = self.y_ticker
y_ticks = list()
for tick_value, tick_label in zip(ticker.values, ticker.labels):
if calibrated_data_range != 0.0:
y_tick = plot_height - plot_height * (tick_value - calibrated_data_min) / calibrated_data_range
else:
y_tick = plot_height - plot_height * 0.5
if y_tick >= 0 and y_tick <= plot_height:
y_ticks.append((y_tick, tick_label))
return y_ticks |
<SYSTEM_TASK:>
Calculate the x-axis items dependent on the plot width.
<END_TASK>
<USER_TASK:>
Description:
def calculate_x_ticks(self, plot_width):
"""Calculate the x-axis items dependent on the plot width.""" |
x_calibration = self.x_calibration
uncalibrated_data_left = self.__uncalibrated_left_channel
uncalibrated_data_right = self.__uncalibrated_right_channel
calibrated_data_left = x_calibration.convert_to_calibrated_value(uncalibrated_data_left) if x_calibration is not None else uncalibrated_data_left
calibrated_data_right = x_calibration.convert_to_calibrated_value(uncalibrated_data_right) if x_calibration is not None else uncalibrated_data_right
calibrated_data_left, calibrated_data_right = min(calibrated_data_left, calibrated_data_right), max(calibrated_data_left, calibrated_data_right)
graph_left, graph_right, tick_values, division, precision = Geometry.make_pretty_range(calibrated_data_left, calibrated_data_right)
drawn_data_width = self.drawn_right_channel - self.drawn_left_channel
x_ticks = list()
if drawn_data_width > 0.0:
for tick_value in tick_values:
label = nice_label(tick_value, precision)
data_tick = x_calibration.convert_from_calibrated_value(tick_value) if x_calibration else tick_value
x_tick = plot_width * (data_tick - self.drawn_left_channel) / drawn_data_width
if x_tick >= 0 and x_tick <= plot_width:
x_ticks.append((x_tick, label))
return x_ticks |
<SYSTEM_TASK:>
Size the canvas item to the proper height.
<END_TASK>
<USER_TASK:>
Description:
def size_to_content(self):
""" Size the canvas item to the proper height. """ |
new_sizing = self.copy_sizing()
new_sizing.minimum_height = 0
new_sizing.maximum_height = 0
axes = self.__axes
if axes and axes.is_valid:
if axes.x_calibration and axes.x_calibration.units:
new_sizing.minimum_height = self.font_size + 4
new_sizing.maximum_height = self.font_size + 4
self.update_sizing(new_sizing) |
<SYSTEM_TASK:>
Size the canvas item to the proper width, the maximum of any label.
<END_TASK>
<USER_TASK:>
Description:
def size_to_content(self, get_font_metrics_fn):
""" Size the canvas item to the proper width, the maximum of any label. """ |
new_sizing = self.copy_sizing()
new_sizing.minimum_width = 0
new_sizing.maximum_width = 0
axes = self.__axes
if axes and axes.is_valid:
# calculate the width based on the label lengths
font = "{0:d}px".format(self.font_size)
max_width = 0
y_range = axes.calibrated_data_max - axes.calibrated_data_min
label = axes.y_ticker.value_label(axes.calibrated_data_max + y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
label = axes.y_ticker.value_label(axes.calibrated_data_min - y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
new_sizing.minimum_width = max_width
new_sizing.maximum_width = max_width
self.update_sizing(new_sizing) |
<SYSTEM_TASK:>
Size the canvas item to the proper width.
<END_TASK>
<USER_TASK:>
Description:
def size_to_content(self):
""" Size the canvas item to the proper width. """ |
new_sizing = self.copy_sizing()
new_sizing.minimum_width = 0
new_sizing.maximum_width = 0
axes = self.__axes
if axes and axes.is_valid:
if axes.y_calibration and axes.y_calibration.units:
new_sizing.minimum_width = self.font_size + 4
new_sizing.maximum_width = self.font_size + 4
self.update_sizing(new_sizing) |
<SYSTEM_TASK:>
Load the content from a snippet file which exists in SNIPPETS_ROOT
<END_TASK>
<USER_TASK:>
Description:
def get_snippet_content(snippet_name, **format_kwargs):
""" Load the content from a snippet file which exists in SNIPPETS_ROOT """ |
filename = snippet_name + '.snippet'
snippet_file = os.path.join(SNIPPETS_ROOT, filename)
if not os.path.isfile(snippet_file):
raise ValueError('could not find snippet with name ' + filename)
ret = helpers.get_file_content(snippet_file)
if format_kwargs:
ret = ret.format(**format_kwargs)
return ret |
<SYSTEM_TASK:>
Update the display values. Called from display panel.
<END_TASK>
<USER_TASK:>
Description:
def update_display_properties(self, display_calibration_info, display_properties: typing.Mapping, display_layers: typing.Sequence[typing.Mapping]) -> None:
"""Update the display values. Called from display panel.
This method saves the display values and data and triggers an update. It should be as fast as possible.
As a layer, this canvas item will respond to the update by calling prepare_render on the layer's rendering
thread. Prepare render will call prepare_display which will construct new axes and update all of the constituent
canvas items such as the axes labels and the graph layers. Each will trigger its own update if its inputs have
changed.
The inefficiencies in this process are that the layer must re-render on each call to this function. There is
also a cost within the constituent canvas items to check whether the axes or their data has changed.
When the display is associated with a single data item, the data will be
""" |
# may be called from thread; prevent a race condition with closing.
with self.__closing_lock:
if self.__closed:
return
displayed_dimensional_scales = display_calibration_info.displayed_dimensional_scales
displayed_dimensional_calibrations = display_calibration_info.displayed_dimensional_calibrations
self.__data_scale = displayed_dimensional_scales[-1] if len(displayed_dimensional_scales) > 0 else 1
self.__displayed_dimensional_calibration = displayed_dimensional_calibrations[-1] if len(displayed_dimensional_calibrations) > 0 else Calibration.Calibration(scale=displayed_dimensional_scales[-1])
self.__intensity_calibration = display_calibration_info.displayed_intensity_calibration
self.__calibration_style = display_calibration_info.calibration_style
self.__y_min = display_properties.get("y_min")
self.__y_max = display_properties.get("y_max")
self.__y_style = display_properties.get("y_style", "linear")
self.__left_channel = display_properties.get("left_channel")
self.__right_channel = display_properties.get("right_channel")
self.__legend_position = display_properties.get("legend_position")
self.__display_layers = display_layers
if self.__display_values_list and len(self.__display_values_list) > 0:
self.__xdata_list = [display_values.display_data_and_metadata if display_values else None for display_values in self.__display_values_list]
xdata0 = self.__xdata_list[0]
if xdata0:
self.__update_frame(xdata0.metadata)
else:
self.__xdata_list = list()
# update the cursor info
self.__update_cursor_info()
# mark for update. prepare display will mark children for update if necesssary.
self.update() |
<SYSTEM_TASK:>
Map the mouse to the 1-d position within the line graph.
<END_TASK>
<USER_TASK:>
Description:
def __update_cursor_info(self):
""" Map the mouse to the 1-d position within the line graph. """ |
if not self.delegate: # allow display to work without delegate
return
if self.__mouse_in and self.__last_mouse:
pos_1d = None
axes = self.__axes
line_graph_canvas_item = self.line_graph_canvas_item
if axes and axes.is_valid and line_graph_canvas_item:
mouse = self.map_to_canvas_item(self.__last_mouse, line_graph_canvas_item)
plot_rect = line_graph_canvas_item.canvas_bounds
if plot_rect.contains_point(mouse):
mouse = mouse - plot_rect.origin
x = float(mouse.x) / plot_rect.width
px = axes.drawn_left_channel + x * (axes.drawn_right_channel - axes.drawn_left_channel)
pos_1d = px,
self.delegate.cursor_changed(pos_1d) |
<SYSTEM_TASK:>
Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass.
<END_TASK>
<USER_TASK:>
Description:
def find_model_patch_tracks(self):
"""
Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass.
Returns:
""" |
self.model_grid.load_data()
tracked_model_objects = []
model_objects = []
if self.model_grid.data is None:
print("No model output found")
return tracked_model_objects
min_orig = self.model_ew.min_thresh
max_orig = self.model_ew.max_thresh
data_increment_orig = self.model_ew.data_increment
self.model_ew.min_thresh = 0
self.model_ew.data_increment = 1
self.model_ew.max_thresh = 100
for h, hour in enumerate(self.hours):
# Identify storms at each time step and apply size filter
print("Finding {0} objects for run {1} Hour: {2:02d}".format(self.ensemble_member,
self.run_date.strftime("%Y%m%d%H"), hour))
if self.mask is not None:
model_data = self.model_grid.data[h] * self.mask
else:
model_data = self.model_grid.data[h]
model_data[:self.patch_radius] = 0
model_data[-self.patch_radius:] = 0
model_data[:, :self.patch_radius] = 0
model_data[:, -self.patch_radius:] = 0
scaled_data = np.array(rescale_data(model_data, min_orig, max_orig))
hour_labels = label_storm_objects(scaled_data, "ew",
self.model_ew.min_thresh, self.model_ew.max_thresh,
min_area=self.size_filter, max_area=self.model_ew.max_size,
max_range=self.model_ew.delta, increment=self.model_ew.data_increment,
gaussian_sd=self.gaussian_window)
model_objects.extend(extract_storm_patches(hour_labels, model_data, self.model_grid.x,
self.model_grid.y, [hour],
dx=self.model_grid.dx,
patch_radius=self.patch_radius))
for model_obj in model_objects[-1]:
dims = model_obj.timesteps[-1].shape
if h > 0:
model_obj.estimate_motion(hour, self.model_grid.data[h-1], dims[1], dims[0])
del scaled_data
del model_data
del hour_labels
tracked_model_objects.extend(track_storms(model_objects, self.hours,
self.object_matcher.cost_function_components,
self.object_matcher.max_values,
self.object_matcher.weights))
self.model_ew.min_thresh = min_orig
self.model_ew.max_thresh = max_orig
self.model_ew.data_increment = data_increment_orig
return tracked_model_objects |
<SYSTEM_TASK:>
Identify objects from MRMS timesteps and link them together with object matching.
<END_TASK>
<USER_TASK:>
Description:
def find_mrms_tracks(self):
"""
Identify objects from MRMS timesteps and link them together with object matching.
Returns:
List of STObjects containing MESH track information.
""" |
obs_objects = []
tracked_obs_objects = []
if self.mrms_ew is not None:
self.mrms_grid.load_data()
if len(self.mrms_grid.data) != len(self.hours):
print('Less than 24 hours of observation data found')
return tracked_obs_objects
for h, hour in enumerate(self.hours):
mrms_data = np.zeros(self.mrms_grid.data[h].shape)
mrms_data[:] = np.array(self.mrms_grid.data[h])
mrms_data[mrms_data < 0] = 0
hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data,
self.gaussian_window)),
self.size_filter)
hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0
obj_slices = find_objects(hour_labels)
num_slices = len(obj_slices)
obs_objects.append([])
if num_slices > 0:
for sl in obj_slices:
obs_objects[-1].append(STObject(mrms_data[sl],
np.where(hour_labels[sl] > 0, 1, 0),
self.model_grid.x[sl],
self.model_grid.y[sl],
self.model_grid.i[sl],
self.model_grid.j[sl],
hour,
hour,
dx=self.model_grid.dx))
if h > 0:
dims = obs_objects[-1][-1].timesteps[0].shape
obs_objects[-1][-1].estimate_motion(hour, self.mrms_grid.data[h-1], dims[1], dims[0])
for h, hour in enumerate(self.hours):
past_time_objs = []
for obj in tracked_obs_objects:
if obj.end_time == hour - 1:
past_time_objs.append(obj)
if len(past_time_objs) == 0:
tracked_obs_objects.extend(obs_objects[h])
elif len(past_time_objs) > 0 and len(obs_objects[h]) > 0:
assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], hour - 1, hour)
unpaired = list(range(len(obs_objects[h])))
for pair in assignments:
past_time_objs[pair[0]].extend(obs_objects[h][pair[1]])
unpaired.remove(pair[1])
if len(unpaired) > 0:
for up in unpaired:
tracked_obs_objects.append(obs_objects[h][up])
print("Tracked Obs Objects: {0:03d} Hour: {1:02d}".format(len(tracked_obs_objects), hour))
return tracked_obs_objects |
<SYSTEM_TASK:>
Match forecast and observed tracks.
<END_TASK>
<USER_TASK:>
Description:
def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):
"""
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
""" |
if unique_matches:
pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)
else:
pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)
return pairings |
<SYSTEM_TASK:>
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm
<END_TASK>
<USER_TASK:>
Description:
def match_hail_sizes(model_tracks, obs_tracks, track_pairings):
"""
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm
track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the
intermediate timesteps.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed STObjects
track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks
""" |
unpaired = list(range(len(model_tracks)))
for p, pair in enumerate(track_pairings):
model_track = model_tracks[pair[0]]
unpaired.remove(pair[0])
obs_track = obs_tracks[pair[1]]
obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max()
for t, step in enumerate(obs_track.timesteps)])
if obs_track.times.size > 1 and model_track.times.size > 1:
normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\
* (obs_track.times - obs_track.times.min())
normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\
* (model_track.times - model_track.times.min())
hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind="nearest",
bounds_error=False, fill_value=0)
model_track.observations = hail_interp(normalized_model_times)
elif obs_track.times.size == 1:
model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0]
elif model_track.times.size == 1:
model_track.observations = np.array([obs_hail_sizes.max()])
print(pair[0], "obs", obs_hail_sizes)
print(pair[0], "model", model_track.observations)
for u in unpaired:
model_tracks[u].observations = np.zeros(model_tracks[u].times.shape) |
<SYSTEM_TASK:>
Calculates spatial and temporal translation errors between matched
<END_TASK>
<USER_TASK:>
Description:
def calc_track_errors(model_tracks, obs_tracks, track_pairings):
"""
Calculates spatial and temporal translation errors between matched
forecast and observed tracks.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed track STObjects
track_pairings: List of tuples pairing forecast and observed tracks.
Returns:
pandas DataFrame containing different track errors
""" |
columns = ['obs_track_id',
'translation_error_x',
'translation_error_y',
'start_time_difference',
'end_time_difference',
]
track_errors = pd.DataFrame(index=list(range(len(model_tracks))),
columns=columns)
for p, pair in enumerate(track_pairings):
model_track = model_tracks[pair[0]]
if type(pair[1]) in [int, np.int64]:
obs_track = obs_tracks[pair[1]]
else:
obs_track = obs_tracks[pair[1][0]]
model_com = model_track.center_of_mass(model_track.start_time)
obs_com = obs_track.center_of_mass(obs_track.start_time)
track_errors.loc[pair[0], 'obs_track_id'] = pair[1] if type(pair[1]) in [int, np.int64] else pair[1][0]
track_errors.loc[pair[0], 'translation_error_x'] = model_com[0] - obs_com[0]
track_errors.loc[pair[0], 'translation_error_y'] = model_com[1] - obs_com[1]
track_errors.loc[pair[0], 'start_time_difference'] = model_track.start_time - obs_track.start_time
track_errors.loc[pair[0], 'end_time_difference'] = model_track.end_time - obs_track.end_time
return track_errors |
<SYSTEM_TASK:>
Return the text display for the given tree node. Based on number of keys associated with tree node.
<END_TASK>
<USER_TASK:>
Description:
def __display_for_tree_node(self, tree_node):
""" Return the text display for the given tree node. Based on number of keys associated with tree node. """ |
keys = tree_node.keys
if len(keys) == 1:
return "{0} ({1})".format(tree_node.keys[-1], tree_node.count)
elif len(keys) == 2:
months = (_("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December"))
return "{0} ({1})".format(months[max(min(tree_node.keys[1]-1, 11), 0)], tree_node.count)
else:
weekdays = (_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), _("Friday"), _("Saturday"), _("Sunday"))
date = datetime.date(tree_node.keys[0], tree_node.keys[1], tree_node.keys[2])
return "{0} - {1} ({2})".format(tree_node.keys[2], weekdays[date.weekday()], tree_node.count) |
<SYSTEM_TASK:>
Called from the root tree node when a new node is inserted into tree. This method creates properties
<END_TASK>
<USER_TASK:>
Description:
def __insert_child(self, parent_tree_node, index, tree_node):
"""
Called from the root tree node when a new node is inserted into tree. This method creates properties
to represent the node for display and inserts it into the item model controller.
""" |
# manage the item model
parent_item = self.__mapping[id(parent_tree_node)]
self.item_model_controller.begin_insert(index, index, parent_item.row, parent_item.id)
properties = {
"display": self.__display_for_tree_node(tree_node),
"tree_node": tree_node # used for removal and other lookup
}
item = self.item_model_controller.create_item(properties)
parent_item.insert_child(index, item)
self.__mapping[id(tree_node)] = item
self.item_model_controller.end_insert() |
<SYSTEM_TASK:>
Called from the root tree node when a node is removed from the tree. This method removes it into the
<END_TASK>
<USER_TASK:>
Description:
def __remove_child(self, parent_tree_node, index):
"""
Called from the root tree node when a node is removed from the tree. This method removes it into the
item model controller.
""" |
# get parent and item
parent_item = self.__mapping[id(parent_tree_node)]
# manage the item model
self.item_model_controller.begin_remove(index, index, parent_item.row, parent_item.id)
child_item = parent_item.children[index]
parent_item.remove_child(child_item)
self.__mapping.pop(id(child_item.data["tree_node"]))
self.item_model_controller.end_remove() |
<SYSTEM_TASK:>
Update all tree item displays if needed. Usually for count updates.
<END_TASK>
<USER_TASK:>
Description:
def update_all_nodes(self):
""" Update all tree item displays if needed. Usually for count updates. """ |
item_model_controller = self.item_model_controller
if item_model_controller:
if self.__node_counts_dirty:
for item in self.__mapping.values():
if "tree_node" in item.data: # don't update the root node
tree_node = item.data["tree_node"]
item.data["display"] = self.__display_for_tree_node(tree_node)
item_model_controller.data_changed(item.row, item.parent.row, item.parent.id)
self.__node_counts_dirty = False |
<SYSTEM_TASK:>
Called to handle selection changes in the tree widget.
<END_TASK>
<USER_TASK:>
Description:
def date_browser_selection_changed(self, selected_indexes):
"""
Called to handle selection changes in the tree widget.
This method should be connected to the on_selection_changed event. This method builds a list
of keys represented by all selected items. It then provides date_filter to filter data items
based on the list of keys. It then sets the filter into the document controller.
:param selected_indexes: The selected indexes
:type selected_indexes: list of ints
""" |
partial_date_filters = list()
for index, parent_row, parent_id in selected_indexes:
item_model_controller = self.item_model_controller
tree_node = item_model_controller.item_value("tree_node", index, parent_id)
partial_date_filters.append(ListModel.PartialDateFilter("created_local", *tree_node.keys))
if len(partial_date_filters) > 0:
self.__date_filter = ListModel.OrFilter(partial_date_filters)
else:
self.__date_filter = None
self.__update_filter() |
<SYSTEM_TASK:>
Called to handle changes to the text filter.
<END_TASK>
<USER_TASK:>
Description:
def text_filter_changed(self, text):
"""
Called to handle changes to the text filter.
:param text: The text for the filter.
""" |
text = text.strip() if text else None
if text is not None:
self.__text_filter = ListModel.TextFilter("text_for_filter", text)
else:
self.__text_filter = None
self.__update_filter() |
<SYSTEM_TASK:>
Create a combined filter. Set the resulting filter into the document controller.
<END_TASK>
<USER_TASK:>
Description:
def __update_filter(self):
"""
Create a combined filter. Set the resulting filter into the document controller.
""" |
filters = list()
if self.__date_filter:
filters.append(self.__date_filter)
if self.__text_filter:
filters.append(self.__text_filter)
self.document_controller.display_filter = ListModel.AndFilter(filters) |
<SYSTEM_TASK:>
Return the keys associated with this node by adding its key and then adding parent keys recursively.
<END_TASK>
<USER_TASK:>
Description:
def __get_keys(self):
""" Return the keys associated with this node by adding its key and then adding parent keys recursively. """ |
keys = list()
tree_node = self
while tree_node is not None and tree_node.key is not None:
keys.insert(0, tree_node.key)
tree_node = tree_node.parent
return keys |
<SYSTEM_TASK:>
From a 2D grid or time series of 2D grids, this method labels storm objects with either the Enhanced Watershed
<END_TASK>
<USER_TASK:>
Description:
def label_storm_objects(data, method, min_intensity, max_intensity, min_area=1, max_area=100, max_range=1,
increment=1, gaussian_sd=0):
"""
From a 2D grid or time series of 2D grids, this method labels storm objects with either the Enhanced Watershed
or Hysteresis methods.
Args:
data: the gridded data to be labeled. Should be a 2D numpy array in (y, x) coordinate order or a 3D numpy array
in (time, y, x) coordinate order
method: "ew" or "watershed" for Enhanced Watershed or "hyst" for hysteresis
min_intensity: Minimum intensity threshold for gridpoints contained within any objects
max_intensity: For watershed, any points above max_intensity are considered as the same value as max intensity.
For hysteresis, all objects have to contain at least 1 pixel that equals or exceeds this value
min_area: (default 1) The minimum area of any object in pixels.
max_area: (default 100) The area threshold in pixels at which the enhanced watershed ends growth. Object area
may exceed this threshold if the pixels at the last watershed level exceed the object area.
max_range: Maximum difference between the maximum and minimum value in an enhanced watershed object before
growth is stopped.
increment: Discretization increment for the enhanced watershed
gaussian_sd: Standard deviation of Gaussian filter applied to data
Returns:
label_grid: an ndarray with the same shape as data in which each pixel is labeled with a positive integer value.
""" |
if method.lower() in ["ew", "watershed"]:
labeler = EnhancedWatershed(min_intensity, increment, max_intensity, max_area, max_range)
else:
labeler = Hysteresis(min_intensity, max_intensity)
if len(data.shape) == 2:
label_grid = labeler.label(gaussian_filter(data, gaussian_sd))
label_grid[data < min_intensity] = 0
if min_area > 1:
label_grid = labeler.size_filter(label_grid, min_area)
else:
label_grid = np.zeros(data.shape, dtype=int)
for t in range(data.shape[0]):
label_grid[t] = labeler.label(gaussian_filter(data[t], gaussian_sd))
label_grid[t][data[t] < min_intensity] = 0
if min_area > 1:
label_grid[t] = labeler.size_filter(label_grid[t], min_area)
return label_grid |
<SYSTEM_TASK:>
After storms are labeled, this method extracts the storm objects from the grid and places them into STObjects.
<END_TASK>
<USER_TASK:>
Description:
def extract_storm_objects(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, obj_buffer=0):
"""
After storms are labeled, this method extracts the storm objects from the grid and places them into STObjects.
The STObjects contain intensity, location, and shape information about each storm at each timestep.
Args:
label_grid: 2D or 3D array output by label_storm_objects.
data: 2D or 3D array used as input to label_storm_objects.
x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length.
y_grid: 2D array of y-coordinate data.
times: List or array of time values, preferably as integers
dx: grid spacing in same units as x_grid and y_grid.
dt: period elapsed between times
obj_buffer: number of extra pixels beyond bounding box of object to store in each STObject
Returns:
storm_objects: list of lists containing STObjects identified at each time.
""" |
storm_objects = []
if len(label_grid.shape) == 3:
ij_grid = np.indices(label_grid.shape[1:])
for t, time in enumerate(times):
storm_objects.append([])
object_slices = list(find_objects(label_grid[t], label_grid[t].max()))
if len(object_slices) > 0:
for o, obj_slice in enumerate(object_slices):
if obj_buffer > 0:
obj_slice_buff = [slice(np.maximum(0, osl.start - obj_buffer),
np.minimum(osl.stop + obj_buffer, label_grid.shape[l + 1]))
for l, osl in enumerate(obj_slice)]
else:
obj_slice_buff = obj_slice
storm_objects[-1].append(STObject(data[t][obj_slice_buff],
np.where(label_grid[t][obj_slice_buff] == o + 1, 1, 0),
x_grid[obj_slice_buff],
y_grid[obj_slice_buff],
ij_grid[0][obj_slice_buff],
ij_grid[1][obj_slice_buff],
time,
time,
dx=dx,
step=dt))
if t > 0:
dims = storm_objects[-1][-1].timesteps[0].shape
storm_objects[-1][-1].estimate_motion(time, data[t - 1], dims[1], dims[0])
else:
ij_grid = np.indices(label_grid.shape)
storm_objects.append([])
object_slices = list(find_objects(label_grid, label_grid.max()))
if len(object_slices) > 0:
for o, obj_slice in enumerate(object_slices):
if obj_buffer > 0:
obj_slice_buff = [slice(np.maximum(0, osl.start - obj_buffer),
np.minimum(osl.stop + obj_buffer, label_grid.shape[l + 1]))
for l, osl in enumerate(obj_slice)]
else:
obj_slice_buff = obj_slice
storm_objects[-1].append(STObject(data[obj_slice_buff],
np.where(label_grid[obj_slice_buff] == o + 1, 1, 0),
x_grid[obj_slice_buff],
y_grid[obj_slice_buff],
ij_grid[0][obj_slice_buff],
ij_grid[1][obj_slice_buff],
times,
times,
dx=dx,
step=dt))
return storm_objects |
<SYSTEM_TASK:>
After storms are labeled, this method extracts boxes of equal size centered on each storm from the grid and places
<END_TASK>
<USER_TASK:>
Description:
def extract_storm_patches(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, patch_radius=16):
"""
After storms are labeled, this method extracts boxes of equal size centered on each storm from the grid and places
them into STObjects. The STObjects contain intensity, location, and shape information about each storm
at each timestep.
Args:
label_grid: 2D or 3D array output by label_storm_objects.
data: 2D or 3D array used as input to label_storm_objects.
x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length.
y_grid: 2D array of y-coordinate data.
times: List or array of time values, preferably as integers
dx: grid spacing in same units as x_grid and y_grid.
dt: period elapsed between times
patch_radius: Number of grid points from center of mass to extract
Returns:
storm_objects: list of lists containing STObjects identified at each time.
""" |
storm_objects = []
if len(label_grid.shape) == 3:
ij_grid = np.indices(label_grid.shape[1:])
for t, time in enumerate(times):
storm_objects.append([])
# object_slices = find_objects(label_grid[t], label_grid[t].max())
centers = list(center_of_mass(data[t], labels=label_grid[t], index=np.arange(1, label_grid[t].max() + 1)))
if len(centers) > 0:
for o, center in enumerate(centers):
int_center = np.round(center).astype(int)
obj_slice_buff = [slice(int_center[0] - patch_radius, int_center[0] + patch_radius),
slice(int_center[1] - patch_radius, int_center[1] + patch_radius)]
storm_objects[-1].append(STObject(data[t][obj_slice_buff],
np.where(label_grid[t][obj_slice_buff] == o + 1, 1, 0),
x_grid[obj_slice_buff],
y_grid[obj_slice_buff],
ij_grid[0][obj_slice_buff],
ij_grid[1][obj_slice_buff],
time,
time,
dx=dx,
step=dt))
if t > 0:
dims = storm_objects[-1][-1].timesteps[0].shape
storm_objects[-1][-1].estimate_motion(time, data[t - 1], dims[1], dims[0])
else:
ij_grid = np.indices(label_grid.shape)
storm_objects.append([])
centers = list(center_of_mass(data, labels=label_grid, index=np.arange(1, label_grid.max() + 1)))
if len(centers) > 0:
for o, center in enumerate(centers):
int_center = np.round(center).astype(int)
obj_slice_buff = (slice(int_center[0] - patch_radius, int_center[0] + patch_radius),
slice(int_center[1] - patch_radius, int_center[1] + patch_radius))
storm_objects[-1].append(STObject(data[obj_slice_buff],
np.where(label_grid[obj_slice_buff] == o + 1, 1, 0),
x_grid[obj_slice_buff],
y_grid[obj_slice_buff],
ij_grid[0][obj_slice_buff],
ij_grid[1][obj_slice_buff],
times[0],
times[0],
dx=dx,
step=dt))
return storm_objects |
<SYSTEM_TASK:>
Given the output of extract_storm_objects, this method tracks storms through time and merges individual
<END_TASK>
<USER_TASK:>
Description:
def track_storms(storm_objects, times, distance_components, distance_maxima, distance_weights, tracked_objects=None):
"""
Given the output of extract_storm_objects, this method tracks storms through time and merges individual
STObjects into a set of tracks.
Args:
storm_objects: list of list of STObjects that have not been tracked.
times: List of times associated with each set of STObjects
distance_components: list of function objects that make up components of distance function
distance_maxima: array of maximum values for each distance for normalization purposes
distance_weights: weight given to each component of the distance function. Should add to 1.
tracked_objects: List of STObjects that have already been tracked.
Returns:
tracked_objects:
""" |
obj_matcher = ObjectMatcher(distance_components, distance_weights, distance_maxima)
if tracked_objects is None:
tracked_objects = []
for t, time in enumerate(times):
past_time_objects = []
for obj in tracked_objects:
if obj.end_time == time - obj.step:
past_time_objects.append(obj)
if len(past_time_objects) == 0:
tracked_objects.extend(storm_objects[t])
elif len(past_time_objects) > 0 and len(storm_objects[t]) > 0:
assignments = obj_matcher.match_objects(past_time_objects, storm_objects[t], times[t-1], times[t])
unpaired = list(range(len(storm_objects[t])))
for pair in assignments:
past_time_objects[pair[0]].extend(storm_objects[t][pair[1]])
unpaired.remove(pair[1])
if len(unpaired) > 0:
for up in unpaired:
tracked_objects.append(storm_objects[t][up])
return tracked_objects |
<SYSTEM_TASK:>
Euclidean distance between the centroids of item_a and item_b.
<END_TASK>
<USER_TASK:>
Description:
def centroid_distance(item_a, time_a, item_b, time_b, max_value):
"""
Euclidean distance between the centroids of item_a and item_b.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value) |
<SYSTEM_TASK:>
Centroid distance with motion corrections.
<END_TASK>
<USER_TASK:>
Description:
def shifted_centroid_distance(item_a, time_a, item_b, time_b, max_value):
"""
Centroid distance with motion corrections.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
ax, ay = item_a.center_of_mass(time_a)
bx, by = item_b.center_of_mass(time_b)
if time_a < time_b:
bx = bx - item_b.u
by = by - item_b.v
else:
ax = ax - item_a.u
ay = ay - item_a.v
return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value) |
<SYSTEM_TASK:>
Euclidean distance between the pixels in item_a and item_b closest to each other.
<END_TASK>
<USER_TASK:>
Description:
def closest_distance(item_a, time_a, item_b, time_b, max_value):
"""
Euclidean distance between the pixels in item_a and item_b closest to each other.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value) |
<SYSTEM_TASK:>
Calculate differences in the properties of ellipses fitted to each object.
<END_TASK>
<USER_TASK:>
Description:
def ellipse_distance(item_a, time_a, item_b, time_b, max_value):
"""
Calculate differences in the properties of ellipses fitted to each object.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
ts = np.array([0, np.pi])
ell_a = item_a.get_ellipse_model(time_a)
ell_b = item_b.get_ellipse_model(time_b)
ends_a = ell_a.predict_xy(ts)
ends_b = ell_b.predict_xy(ts)
distances = np.sqrt((ends_a[:, 0:1] - ends_b[:, 0:1].T) ** 2 + (ends_a[:, 1:] - ends_b[:, 1:].T) ** 2)
return np.minimum(distances[0, 1], max_value) / float(max_value) |
<SYSTEM_TASK:>
Percentage of pixels in each object that do not overlap with the other object
<END_TASK>
<USER_TASK:>
Description:
def nonoverlap(item_a, time_a, item_b, time_b, max_value):
"""
Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value) |
<SYSTEM_TASK:>
RMS difference in maximum intensity
<END_TASK>
<USER_TASK:>
Description:
def max_intensity(item_a, time_a, item_b, time_b, max_value):
"""
RMS difference in maximum intensity
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
intensity_a = item_a.max_intensity(time_a)
intensity_b = item_b.max_intensity(time_b)
diff = np.sqrt((intensity_a - intensity_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) |
<SYSTEM_TASK:>
RMS Difference in object areas.
<END_TASK>
<USER_TASK:>
Description:
def area_difference(item_a, time_a, item_b, time_b, max_value):
"""
RMS Difference in object areas.
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
size_a = item_a.size(time_a)
size_b = item_b.size(time_b)
diff = np.sqrt((size_a - size_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) |
<SYSTEM_TASK:>
RMS difference in the minimum distances from the centroids of one track to the centroids of another track
<END_TASK>
<USER_TASK:>
Description:
def mean_minimum_centroid_distance(item_a, item_b, max_value):
"""
RMS difference in the minimum distances from the centroids of one track to the centroids of another track
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
centroids_a = np.array([item_a.center_of_mass(t) for t in item_a.times])
centroids_b = np.array([item_b.center_of_mass(t) for t in item_b.times])
distance_matrix = (centroids_a[:, 0:1] - centroids_b.T[0:1]) ** 2 + (centroids_a[:, 1:] - centroids_b.T[1:]) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return np.minimum(mean_min_distances, max_value) / float(max_value) |
<SYSTEM_TASK:>
Calculate the mean time difference among the time steps in each object.
<END_TASK>
<USER_TASK:>
Description:
def mean_min_time_distance(item_a, item_b, max_value):
"""
Calculate the mean time difference among the time steps in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
times_a = item_a.times.reshape((item_a.times.size, 1))
times_b = item_b.times.reshape((1, item_b.times.size))
distance_matrix = (times_a - times_b) ** 2
mean_min_distances = np.sqrt(distance_matrix.min(axis=0).mean() + distance_matrix.min(axis=1).mean())
return np.minimum(mean_min_distances, max_value) / float(max_value) |
<SYSTEM_TASK:>
Distance between the centroids of the first step in each object.
<END_TASK>
<USER_TASK:>
Description:
def start_centroid_distance(item_a, item_b, max_value):
"""
Distance between the centroids of the first step in each object.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
start_a = item_a.center_of_mass(item_a.times[0])
start_b = item_b.center_of_mass(item_b.times[0])
start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2)
return np.minimum(start_distance, max_value) / float(max_value) |
<SYSTEM_TASK:>
Absolute difference between the starting times of each item.
<END_TASK>
<USER_TASK:>
Description:
def start_time_distance(item_a, item_b, max_value):
"""
Absolute difference between the starting times of each item.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
start_time_diff = np.abs(item_a.times[0] - item_b.times[0])
return np.minimum(start_time_diff, max_value) / float(max_value) |
<SYSTEM_TASK:>
Absolute difference in the duration of two items
<END_TASK>
<USER_TASK:>
Description:
def duration_distance(item_a, item_b, max_value):
"""
Absolute difference in the duration of two items
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
duration_a = item_a.times.size
duration_b = item_b.times.size
return np.minimum(np.abs(duration_a - duration_b), max_value) / float(max_value) |
<SYSTEM_TASK:>
Absolute difference in the means of the areas of each track over time.
<END_TASK>
<USER_TASK:>
Description:
def mean_area_distance(item_a, item_b, max_value):
"""
Absolute difference in the means of the areas of each track over time.
Args:
item_a: STObject from the first set in TrackMatcher
item_b: STObject from the second set in TrackMatcher
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
""" |
mean_area_a = np.mean([item_a.size(t) for t in item_a.times])
mean_area_b = np.mean([item_b.size(t) for t in item_b.times])
return np.abs(mean_area_a - mean_area_b) / float(max_value) |
<SYSTEM_TASK:>
Match two sets of objects at particular times.
<END_TASK>
<USER_TASK:>
Description:
def match_objects(self, set_a, set_b, time_a, time_b):
"""
Match two sets of objects at particular times.
Args:
set_a: list of STObjects
set_b: list of STObjects
time_a: time at which set_a is being evaluated for matching
time_b: time at which set_b is being evaluated for matching
Returns:
List of tuples containing (set_a index, set_b index) for each match
""" |
costs = self.cost_matrix(set_a, set_b, time_a, time_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
assignments = []
if len(good_rows) > 0 and len(good_cols) > 0:
munk = Munkres()
initial_assignments = munk.compute(costs[tuple(np.meshgrid(good_rows, good_cols, indexing='ij'))].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if costs[a[0], a[1]] < 100:
assignments.append(a)
return assignments |
<SYSTEM_TASK:>
Calculate total cost function between two items.
<END_TASK>
<USER_TASK:>
Description:
def total_cost_function(self, item_a, item_b, time_a, time_b):
"""
Calculate total cost function between two items.
Args:
item_a: STObject
item_b: STObject
time_a: Timestep in item_a at which cost function is evaluated
time_b: Timestep in item_b at which cost function is evaluated
Returns:
The total weighted distance between item_a and item_b
""" |
distances = np.zeros(len(self.weights))
for c, component in enumerate(self.cost_function_components):
distances[c] = component(item_a, time_a, item_b, time_b, self.max_values[c])
total_distance = np.sum(self.weights * distances)
return total_distance |
<SYSTEM_TASK:>
Return the variable specifier for this variable.
<END_TASK>
<USER_TASK:>
Description:
def variable_specifier(self) -> dict:
"""Return the variable specifier for this variable.
The specifier can be used to lookup the value of this variable in a computation context.
""" |
if self.value_type is not None:
return {"type": "variable", "version": 1, "uuid": str(self.uuid), "x-name": self.name, "x-value": self.value}
else:
return self.specifier |
<SYSTEM_TASK:>
Return an object with a value property and a changed_event.
<END_TASK>
<USER_TASK:>
Description:
def bound_variable(self):
"""Return an object with a value property and a changed_event.
The value property returns the value of the variable. The changed_event is fired
whenever the value changes.
""" |
class BoundVariable:
def __init__(self, variable):
self.__variable = variable
self.changed_event = Event.Event()
self.needs_rebind_event = Event.Event()
def property_changed(key):
if key == "value":
self.changed_event.fire()
self.__variable_property_changed_listener = variable.property_changed_event.listen(property_changed)
@property
def value(self):
return self.__variable.value
def close(self):
self.__variable_property_changed_listener.close()
self.__variable_property_changed_listener = None
return BoundVariable(self) |
<SYSTEM_TASK:>
Resolve the object specifier.
<END_TASK>
<USER_TASK:>
Description:
def resolve_object_specifier(self, object_specifier, secondary_specifier=None, property_name=None, objects_model=None):
"""Resolve the object specifier.
First lookup the object specifier in the enclosing computation. If it's not found,
then lookup in the computation's context. Otherwise it should be a value type variable.
In that case, return the bound variable.
""" |
variable = self.__computation().resolve_variable(object_specifier)
if not variable:
return self.__context.resolve_object_specifier(object_specifier, secondary_specifier, property_name, objects_model)
elif variable.specifier is None:
return variable.bound_variable
return None |
<SYSTEM_TASK:>
Return the list of identifiers used in the expression.
<END_TASK>
<USER_TASK:>
Description:
def parse_names(cls, expression):
"""Return the list of identifiers used in the expression.""" |
names = set()
try:
ast_node = ast.parse(expression, "ast")
class Visitor(ast.NodeVisitor):
def visit_Name(self, node):
names.add(node.id)
Visitor().visit(ast_node)
except Exception:
pass
return names |
<SYSTEM_TASK:>
Bind a context to this computation.
<END_TASK>
<USER_TASK:>
Description:
def bind(self, context) -> None:
"""Bind a context to this computation.
The context allows the computation to convert object specifiers to actual objects.
""" |
# make a computation context based on the enclosing context.
self.__computation_context = ComputationContext(self, context)
# re-bind is not valid. be careful to set the computation after the data item is already in document.
for variable in self.variables:
assert variable.bound_item is None
for result in self.results:
assert result.bound_item is None
# bind the variables
for variable in self.variables:
self.__bind_variable(variable)
# bind the results
for result in self.results:
self.__bind_result(result) |
<SYSTEM_TASK:>
Unlisten and close each bound item.
<END_TASK>
<USER_TASK:>
Description:
def unbind(self):
"""Unlisten and close each bound item.""" |
for variable in self.variables:
self.__unbind_variable(variable)
for result in self.results:
self.__unbind_result(result) |
<SYSTEM_TASK:>
A sort key to for the created field of a data item. The sort by uuid makes it determinate.
<END_TASK>
<USER_TASK:>
Description:
def sort_by_date_key(data_item):
""" A sort key to for the created field of a data item. The sort by uuid makes it determinate. """ |
return data_item.title + str(data_item.uuid) if data_item.is_live else str(), data_item.date_for_sorting, str(data_item.uuid) |
<SYSTEM_TASK:>
Used to signal changes to the ref var, which are kept in document controller. ugh.
<END_TASK>
<USER_TASK:>
Description:
def set_r_value(self, r_var: str, *, notify_changed=True) -> None:
"""Used to signal changes to the ref var, which are kept in document controller. ugh.""" |
self.r_var = r_var
self._description_changed()
if notify_changed: # set to False to set the r-value at startup; avoid marking it as a change
self.__notify_description_changed() |
<SYSTEM_TASK:>
Sets the underlying data and data-metadata to the data_and_metadata.
<END_TASK>
<USER_TASK:>
Description:
def set_data_and_metadata(self, data_and_metadata, data_modified=None):
"""Sets the underlying data and data-metadata to the data_and_metadata.
Note: this does not make a copy of the data.
""" |
self.increment_data_ref_count()
try:
if data_and_metadata:
data = data_and_metadata.data
data_shape_and_dtype = data_and_metadata.data_shape_and_dtype
intensity_calibration = data_and_metadata.intensity_calibration
dimensional_calibrations = data_and_metadata.dimensional_calibrations
metadata = data_and_metadata.metadata
timestamp = data_and_metadata.timestamp
data_descriptor = data_and_metadata.data_descriptor
timezone = data_and_metadata.timezone or Utility.get_local_timezone()
timezone_offset = data_and_metadata.timezone_offset or Utility.TimezoneMinutesToStringConverter().convert(Utility.local_utcoffset_minutes())
new_data_and_metadata = DataAndMetadata.DataAndMetadata(self.__load_data, data_shape_and_dtype, intensity_calibration, dimensional_calibrations, metadata, timestamp, data, data_descriptor, timezone, timezone_offset)
else:
new_data_and_metadata = None
self.__set_data_metadata_direct(new_data_and_metadata, data_modified)
if self.__data_and_metadata is not None:
if self.persistent_object_context and not self.persistent_object_context.is_write_delayed(self):
self.persistent_object_context.write_external_data(self, "data", self.__data_and_metadata.data)
self.__data_and_metadata.unloadable = True
finally:
self.decrement_data_ref_count() |
<SYSTEM_TASK:>
Return the display values.
<END_TASK>
<USER_TASK:>
Description:
def get_calculated_display_values(self, immediate: bool=False) -> DisplayValues:
"""Return the display values.
Return the current (possibly uncalculated) display values unless 'immediate' is specified.
If 'immediate', return the existing (calculated) values if they exist. Using the 'immediate' values
avoids calculation except in cases where the display values haven't already been calculated.
""" |
if not immediate or not self.__is_master or not self.__last_display_values:
if not self.__current_display_values and self.__data_item:
self.__current_display_values = DisplayValues(self.__data_item.xdata, self.sequence_index, self.collection_index, self.slice_center, self.slice_width, self.display_limits, self.complex_display_type, self.__color_map_data)
def finalize(display_values):
self.__last_display_values = display_values
self.display_values_changed_event.fire()
self.__current_display_values.on_finalize = finalize
return self.__current_display_values
return self.__last_display_values |
<SYSTEM_TASK:>
Calculate best display limits and set them.
<END_TASK>
<USER_TASK:>
Description:
def auto_display_limits(self):
"""Calculate best display limits and set them.""" |
display_data_and_metadata = self.get_calculated_display_values(True).display_data_and_metadata
data = display_data_and_metadata.data if display_data_and_metadata else None
if data is not None:
# The old algorithm was a problem during EELS where the signal data
# is a small percentage of the overall data and was falling outside
# the included range. This is the new simplified algorithm. Future
# feature may allow user to select more complex algorithms.
mn, mx = numpy.nanmin(data), numpy.nanmax(data)
self.display_limits = mn, mx |
<SYSTEM_TASK:>
Remove a graphic, but do it through the container, so dependencies can be tracked.
<END_TASK>
<USER_TASK:>
Description:
def remove_graphic(self, graphic: Graphics.Graphic, *, safe: bool=False) -> typing.Optional[typing.Sequence]:
"""Remove a graphic, but do it through the container, so dependencies can be tracked.""" |
return self.remove_model_item(self, "graphics", graphic, safe=safe) |
<SYSTEM_TASK:>
Shape of the underlying data, if only one.
<END_TASK>
<USER_TASK:>
Description:
def dimensional_shape(self) -> typing.Optional[typing.Tuple[int, ...]]:
"""Shape of the underlying data, if only one.""" |
if not self.__data_and_metadata:
return None
return self.__data_and_metadata.dimensional_shape |
<SYSTEM_TASK:>
Writes a zip file local file header structure at the current file position.
<END_TASK>
<USER_TASK:>
Description:
def write_local_file(fp, name_bytes, writer, dt):
"""
Writes a zip file local file header structure at the current file position.
Returns data_len, crc32 for the data.
:param fp: the file point to which to write the header
:param name: the name of the file
:param writer: a function taking an fp parameter to do the writing, returns crc32
:param dt: the datetime to write to the archive
""" |
fp.write(struct.pack('I', 0x04034b50)) # local file header
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
crc32_pos = fp.tell()
fp.write(struct.pack('I', 0)) # crc32 placeholder
data_len_pos = fp.tell()
fp.write(struct.pack('I', 0)) # compressed length placeholder
fp.write(struct.pack('I', 0)) # uncompressed length placeholder
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(name_bytes)
data_start_pos = fp.tell()
crc32 = writer(fp)
data_end_pos = fp.tell()
data_len = data_end_pos - data_start_pos
fp.seek(crc32_pos)
fp.write(struct.pack('I', crc32)) # crc32
fp.seek(data_len_pos)
fp.write(struct.pack('I', data_len)) # compressed length placeholder
fp.write(struct.pack('I', data_len)) # uncompressed length placeholder
fp.seek(data_end_pos)
return data_len, crc32 |
<SYSTEM_TASK:>
Write a zip fie directory entry at the current file position
<END_TASK>
<USER_TASK:>
Description:
def write_directory_data(fp, offset, name_bytes, data_len, crc32, dt):
"""
Write a zip fie directory entry at the current file position
:param fp: the file point to which to write the header
:param offset: the offset of the associated local file header
:param name: the name of the file
:param data_len: the length of data that will be written to the archive
:param crc32: the crc32 of the data to be written
:param dt: the datetime to write to the archive
""" |
fp.write(struct.pack('I', 0x02014b50)) # central directory header
fp.write(struct.pack('H', 10)) # made by version (default)
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
fp.write(struct.pack('I', crc32)) # crc32
fp.write(struct.pack('I', data_len)) # compressed length
fp.write(struct.pack('I', data_len)) # uncompressed length
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(struct.pack('H', 0)) # comments length
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # internal file attributes
fp.write(struct.pack('I', 0)) # external file attributes
fp.write(struct.pack('I', offset)) # relative offset of file header
fp.write(name_bytes) |
<SYSTEM_TASK:>
Write zip file end of directory header at the current file position
<END_TASK>
<USER_TASK:>
Description:
def write_end_of_directory(fp, dir_size, dir_offset, count):
"""
Write zip file end of directory header at the current file position
:param fp: the file point to which to write the header
:param dir_size: the total size of the directory
:param dir_offset: the start of the first directory header
:param count: the count of files
""" |
fp.write(struct.pack('I', 0x06054b50)) # central directory header
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('I', dir_size)) # central directory size
fp.write(struct.pack('I', dir_offset)) # central directory offset
fp.write(struct.pack('H', 0)) |
<SYSTEM_TASK:>
Write custom zip file of data and properties to fp
<END_TASK>
<USER_TASK:>
Description:
def write_zip_fp(fp, data, properties, dir_data_list=None):
"""
Write custom zip file of data and properties to fp
:param fp: the file point to which to write the header
:param data: the data to write to the file; may be None
:param properties: the properties to write to the file; may be None
:param dir_data_list: optional list of directory header information structures
If dir_data_list is specified, data should be None and properties should
be specified. Then the existing data structure will be left alone and only
the directory headers and end of directory header will be written.
Otherwise, if both data and properties are specified, both are written
out in full.
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
""" |
assert data is not None or properties is not None
# dir_data_list has the format: local file record offset, name, data length, crc32
dir_data_list = list() if dir_data_list is None else dir_data_list
dt = datetime.datetime.now()
if data is not None:
offset_data = fp.tell()
def write_data(fp):
numpy_start_pos = fp.tell()
numpy.save(fp, data)
numpy_end_pos = fp.tell()
fp.seek(numpy_start_pos)
data_c = numpy.require(data, dtype=data.dtype, requirements=["C_CONTIGUOUS"])
header_data = fp.read((numpy_end_pos - numpy_start_pos) - data_c.nbytes) # read the header
data_crc32 = binascii.crc32(data_c.data, binascii.crc32(header_data)) & 0xFFFFFFFF
fp.seek(numpy_end_pos)
return data_crc32
data_len, crc32 = write_local_file(fp, b"data.npy", write_data, dt)
dir_data_list.append((offset_data, b"data.npy", data_len, crc32))
if properties is not None:
json_str = str()
try:
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Geometry.IntPoint) or isinstance(obj, Geometry.IntSize) or isinstance(obj, Geometry.IntRect) or isinstance(obj, Geometry.FloatPoint) or isinstance(obj, Geometry.FloatSize) or isinstance(obj, Geometry.FloatRect):
return tuple(obj)
else:
return json.JSONEncoder.default(self, obj)
json_io = io.StringIO()
json.dump(properties, json_io, cls=JSONEncoder)
json_str = json_io.getvalue()
except Exception as e:
# catch exceptions to avoid corrupt zip files
import traceback
logging.error("Exception writing zip file %s" + str(e))
traceback.print_exc()
traceback.print_stack()
def write_json(fp):
json_bytes = bytes(json_str, 'ISO-8859-1')
fp.write(json_bytes)
return binascii.crc32(json_bytes) & 0xFFFFFFFF
offset_json = fp.tell()
json_len, json_crc32 = write_local_file(fp, b"metadata.json", write_json, dt)
dir_data_list.append((offset_json, b"metadata.json", json_len, json_crc32))
dir_offset = fp.tell()
for offset, name_bytes, data_len, crc32 in dir_data_list:
write_directory_data(fp, offset, name_bytes, data_len, crc32, dt)
dir_size = fp.tell() - dir_offset
write_end_of_directory(fp, dir_size, dir_offset, len(dir_data_list))
fp.truncate() |
<SYSTEM_TASK:>
Write custom zip file to the file path
<END_TASK>
<USER_TASK:>
Description:
def write_zip(file_path, data, properties):
"""
Write custom zip file to the file path
:param file_path: the file to which to write the zip file
:param data: the data to write to the file; may be None
:param properties: the properties to write to the file; may be None
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
See write_zip_fp.
""" |
with open(file_path, "w+b") as fp:
write_zip_fp(fp, data, properties) |
<SYSTEM_TASK:>
Parse the zip file headers at fp
<END_TASK>
<USER_TASK:>
Description:
def parse_zip(fp):
"""
Parse the zip file headers at fp
:param fp: the file pointer from which to parse the zip file
:return: A tuple of local files, directory headers, and end of central directory
The local files are dictionary where the keys are the local file offset and the
values are each a tuple consisting of the name, data position, data length, and crc32.
The directory headers are a dictionary where the keys are the names of the files
and the values are a tuple consisting of the directory header position, and the
associated local file position.
The end of central directory is a tuple consisting of the location of the end of
central directory header and the location of the first directory header.
This method will seek to location 0 of fp and leave fp at end of file.
""" |
local_files = {}
dir_files = {}
eocd = None
fp.seek(0)
while True:
pos = fp.tell()
signature = struct.unpack('I', fp.read(4))[0]
if signature == 0x04034b50:
fp.seek(pos + 14)
crc32 = struct.unpack('I', fp.read(4))[0]
fp.seek(pos + 18)
data_len = struct.unpack('I', fp.read(4))[0]
fp.seek(pos + 26)
name_len = struct.unpack('H', fp.read(2))[0]
extra_len = struct.unpack('H', fp.read(2))[0]
name_bytes = fp.read(name_len)
fp.seek(extra_len, os.SEEK_CUR)
data_pos = fp.tell()
fp.seek(data_len, os.SEEK_CUR)
local_files[pos] = (name_bytes, data_pos, data_len, crc32)
elif signature == 0x02014b50:
fp.seek(pos + 28)
name_len = struct.unpack('H', fp.read(2))[0]
extra_len = struct.unpack('H', fp.read(2))[0]
comment_len = struct.unpack('H', fp.read(2))[0]
fp.seek(pos + 42)
pos2 = struct.unpack('I', fp.read(4))[0]
name_bytes = fp.read(name_len)
fp.seek(pos + 46 + name_len + extra_len + comment_len)
dir_files[name_bytes] = (pos, pos2)
elif signature == 0x06054b50:
fp.seek(pos + 16)
pos2 = struct.unpack('I', fp.read(4))[0]
eocd = (pos, pos2)
break
else:
raise IOError()
return local_files, dir_files, eocd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.