input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
add data for stats
# efficiency_data_mouse.append(efficiency[trial_num])
# # duration_data_mouse.append(end_idx[trial_num]) #TEMP COMMENTING
# duration_data_mouse.append(RT)
# prev_homings_data_mouse.append(num_prev_homings_EV[trial_num])
#
# t += 1
#
# # append data for stats
# if efficiency_data_mouse:
# efficiency_data_all.append(efficiency_data_mouse)
# duration_data_all.append(duration_data_mouse)
# prev_homings_data_all.append(prev_homings_data_mouse)
# all_conditions.append(data_condition[c])
# mouse_ID.append(m);
# m += 1
#
# # format end ind
# # end_idx = np.array([e/30 for e in end_idx])
# end_idx[np.isnan(efficiency)] = np.nan
# # loop over data to plot
# for i, (data, data_label) in enumerate(zip([efficiency_RT, end_idx, RT_all, avg_speed, edginess],
# ['Efficiency'])): # , 'Duration', 'Reaction Time', 'Speed', 'Trajectory'])): #edginess, 'Trajectory',
# # for i, (data, data_label) in enumerate(zip([edginess], ['Trajectory'])): # edginess, 'Trajectory',
#
# # for i, (data, data_label) in enumerate(zip([edginess, efficiency, end_idx], ['Trajectory', 'Efficiency', 'Duration'])):
# # for x_data, x_data_label in zip([num_prev_homings], ['Prior homings']):
# plot_data = data[~np.isnan(data)]
#
# # for x_data, x_data_label in zip([trials, time, num_prev_homings_EV, num_prev_homings_HV, prev_edginess, time_exploring, distance_exploring, time_exploring_far, time_exploring_obstacle],
# # ['Trials', 'Time', 'Edge vector homings', 'Homing vector homings', 'Mean prior trajectory','Time exploring', 'Distance explored', 'Time exploring far side', 'Time exploring obstacle']):
#
# for x_data, x_data_label in zip([trials, time_exploring], ['trial number']): # , 'Time exploring']):
#
# print('\nCorrelation between ' + data_label + ' and ' + x_data_label)
#
# # only plot escapes
# data_for_box_plot = data[~np.isnan(data)]
# print(len(data_for_box_plot))
# x_data = x_data[~np.isnan(data)]
#
# # get the correlation
# r, p = scipy.stats.pearsonr(x_data, data_for_box_plot)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# # initialize figure
# plt.title(data_label + ' x ' + x_data_label)
# # set up the figure
# # if data_label=='Efficiency': ax1.set_ylim([-.03, 1.03])
# # elif data_label=='Duration': ax1.set_ylim([-.1, 7])
#
# if np.max(x_data) < 5:
# ax1.set_xticks(np.unique(x_data).astype(int))
# else:
# ax1.set_xticks(np.arange(5, 25, 5))
# # ax1.set_xlim([5,20])
#
# # jitter the axis
# scatter_axis = scatter_the_axis_efficiency(plot_data, x_data + c/3 - .2)
# # plot each trial
# ax1.scatter(scatter_axis, plot_data, color=colors[c], s=15, alpha=1, edgecolor=colors[c], linewidth=1)
#
# for x in np.unique(x_data):
# # plot kde
# kde = fit_kde(plot_data[x_data==x], bw=.02) #.2) # .04
# plot_kde(ax1, kde, plot_data[x_data==x], z=x + c/3 - .15, vertical=True, normto=.15, color=kde_colors[c], violin=False, clip=True)
#
# # box and whisker
# bp = ax1.boxplot([plot_data[x_data==x], [0, 0]], positions=[x + c / 3 - .2, -10], showfliers=False, widths = [0.05, .05], zorder=99)
# plt.setp(bp['boxes'], color=[.5, .5, .5], linewidth=2)
# plt.setp(bp['whiskers'], color=[.5, .5, .5], linewidth=2)
# plt.setp(bp['medians'], linewidth=2)
# ax1.set_xlim(.25, 3.75)
# ax1.set_ylim(.5, 1.05)
# # ax1.set_ylim(.95, 1.9)
# ax1.set_xticks([1,2,3])
# ax1.set_xticklabels([1,2,3])
#
#
#
# # # for each trial
# # for x in np.unique(x_data):
# # # plot kde
# # kde = fit_kde(plot_data[x_data>=0], bw=.02) #.2) # .04
# # plot_kde(ax1, kde, plot_data[x_data>=0], z=x + c/3 - .15, vertical=True, normto=.15, color=kde_colors[c], violin=False, clip=True)
# #
# # # box and whisker
# # bp = ax1.boxplot([plot_data[x_data>=0], [0, 0]], positions=[x + c / 3 - .2, -10], showfliers=False, widths = [0.05, .05], zorder=99)
# # plt.setp(bp['boxes'], color=[.5, .5, .5], linewidth=2)
# # plt.setp(bp['whiskers'], color=[.5, .5, .5], linewidth=2)
# # plt.setp(bp['medians'], linewidth=2)
# # ax1.set_xlim(.25, 3.75)
# # ax1.set_ylim(.5, 1.05)
# # # ax1.set_ylim(.95, 1.9)
# # ax1.set_xticks([1,2,3])
# # ax1.set_xticklabels([1,2,3])
# #
# # # jitter the axis
# # scatter_axis = scatter_the_axis_efficiency(plot_data, np.ones_like(plot_data) * (x + c/3 - .2))
# # # plot each trial
# # ax1.scatter(scatter_axis, plot_data, color=colors[c], s=15, alpha=1, edgecolor=colors[c], linewidth=1)
#
#
#
# ax1.plot([-1, 4], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
# # save the plot
# plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.png'), format='png')
# plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
#
# plt.show()
# print('done')
#
#
#
def plot_efficiency(self):
# initialize parameters
fps = 30
traj_loc = 40
escape_duration = 12 #12 #6
HV_cutoff = .681
ETD = 10
# ax2, fig2, ax3, fig3 = initialize_figures_efficiency(self)
efficiency_data = [[],[],[],[]]
duration_data = [[],[],[],[]]
# initialize arrays for stats
efficiency_data_all = []
duration_data_all = []
prev_homings_data_all = []
all_conditions = []
mouse_ID = []; m = 1
# data_condition = ['naive','experienced']
data_condition = ['escape', 'food']
# data_condition = ['OR - EV', 'OR - HV', 'OF']
# data_condition = ['Obstacle removed (no shelter)', 'obstacle removed', 'acute OR', 'obstacle']
colors = [[0,0,0],[1,0,0]]
#
plot_stuff = True
do_traversals = False
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
print(' - - - -- - - - -- - - - - - - -- - - - - - - - - -')
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_front_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre,\
time_exploring_obstacle_post,time_exploring_far_pre,time_exploring_far_post, time_exploring_edge, time_exploring_other_edge, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment:
escape_duration = 12
if 'food' in experiment: escape_duration = 9
# else:escape_duration = 9
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
print(mouse)
# initialize arrays for stats
efficiency_data_mouse = []
duration_data_mouse = []
prev_homings_data_mouse = []
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
if t > 2 and not 'food' in experiment and not 'void' in experiment and not 'dark' in experiment: continue
if 'food' in experiment and condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
if t > 8: continue
# if t > 2: continue
# if 'on off' in experiment and trial: continue
# print(t)
# impose coniditions
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
if (end_idx[trial_num] > escape_duration * fps) or np.isnan(end_idx[trial_num]): continue
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue #25
# get prev edginess
_, _ = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
# only do predict edgy:
# if c == 0:
# if prev_edginess[trial_num] <= HV_cutoff and 'down' in experiment: continue
# elif c == 1:
# if prev_edginess[trial_num] > HV_cutoff and 'down' in experiment: continue
# add data
fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV,num_prev_homings_front_EV, num_prev_homings_other_EV,num_prev_homings_HV,
time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre,
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge, time_exploring_other_edge,
self, time, trial, trial_num, trials, edginess, t)
# if edginess[trial_num] < HV_cutoff: continue
if do_traversals:
traversal = self.analysis[experiment][condition]['back traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
x_edge = self.analysis[experiment][condition]['x edge'][mouse][trial]
# if x_edge==25: x_edge = 75
# else: x_edge = 25
spont_edge = []
for trav in traversal[0 * 5 + 0]:
spont_edge.append(trav[0][-1]*scaling_factor)
esc_edge = []
for trav in traversal[1 * 5 + 0]:
esc_edge.append(trav[0][-1]*scaling_factor)
num_prev_homings_EV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial]-(15+20*('void' in experiment))) * 30 * 60)) + \
np.sum((np.array(traversal[1 * 5 + 3]) < 1.5) * (abs(np.array(esc_edge)-x_edge) < 25) * \
(np.array(traversal[1 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - (15+20*('void' in experiment))) * 30 * 60))
num_prev_homings_HV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) < HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial]-(15+20*('void' in experiment))) * 30 * 60)) + \
np.sum((np.array(traversal[1 * 5 + 3]) < 1.5) * (abs(np.array(esc_edge)-x_edge) < 25) * \
(np.array(traversal[1 * 5 + 2]) < HV_cutoff) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - (15+20*('void' in experiment))) * 30 * 60))
eligible_homings = ~((np.array(traversal[0 * 5 + 2]) > HV_cutoff) * (abs(np.array(spont_edge)-x_edge) > 40)) * (np.array(traversal[0 * 5 + 3]) < 3) * \
(np.array(traversal[0 * 5 + 1]) < | |
<reponame>dax-1895/magenta
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for converting between performance input and model input/output."""
from __future__ import division
import math
# internal imports
import numpy as np
from magenta.music import encoder_decoder
from magenta.music import performance_lib
from magenta.music.encoder_decoder import EventSequenceEncoderDecoder
from magenta.music.performance_lib import PerformanceEvent
# Number of floats used to encode NOTE_ON and NOTE_OFF events, using modulo-12
# encoding. 5 floats for: valid, octave_cos, octave_sin, note_cos, note_sin.
MODULO_PITCH_ENCODER_WIDTH = 5
# Number of floats used to encode TIME_SHIFT and VELOCITY events using
# module-bins encoding. 3 floats for: valid, event_cos, event_sin.
MODULO_VELOCITY_ENCODER_WIDTH = 3
MODULO_TIME_SHIFT_ENCODER_WIDTH = 3
MODULO_EVENT_RANGES = [
(PerformanceEvent.NOTE_ON, performance_lib.MIN_MIDI_PITCH,
performance_lib.MAX_MIDI_PITCH, MODULO_PITCH_ENCODER_WIDTH),
(PerformanceEvent.NOTE_OFF, performance_lib.MIN_MIDI_PITCH,
performance_lib.MAX_MIDI_PITCH, MODULO_PITCH_ENCODER_WIDTH),
]
class PerformanceModuloEncoding(object):
"""Modulo encoding for performance events."""
def __init__(self, num_velocity_bins=0,
max_shift_steps=performance_lib.DEFAULT_MAX_SHIFT_STEPS):
"""Initiaizer for PerformanceModuloEncoding.
Args:
num_velocity_bins: Number of velocity bins.
max_shift_steps: Maximum number of shift steps supported.
"""
self._event_ranges = MODULO_EVENT_RANGES + [
(PerformanceEvent.TIME_SHIFT, 1, max_shift_steps,
MODULO_TIME_SHIFT_ENCODER_WIDTH)
]
if num_velocity_bins > 0:
self._event_ranges.append(
(PerformanceEvent.VELOCITY, 1, num_velocity_bins,
MODULO_VELOCITY_ENCODER_WIDTH))
self._max_shift_steps = max_shift_steps
self._num_velocity_bins = num_velocity_bins
# Create a lookup table for modulo-12 encoding of pitch classes.
# Possible values for semitone_steps are 1 and 7. A value of 1 corresponds
# to placing notes consecutively on the unit circle. A value of 7
# corresponds to following each note with one that is 7 semitones above it.
# semitone_steps = 1 seems to produce better results, and is the recommended
# value. Moreover, unit tests are provided only for semitone_steps = 1. If
# in the future you plan to enable support for semitone_steps = 7, then
# please make semitone_steps a parameter of this method, and add unit tests
# for it.
semitone_steps = 1
self._pitch_class_table = np.zeros((12, 2))
for i in range(12):
row = (i * semitone_steps) % 12
angle = (float(row) * math.pi) / 6.0
self._pitch_class_table[row] = [math.cos(angle), math.sin(angle)]
# Create a lookup table for modulo-144 encoding of notes. Encode each note
# on a unit circle of 144 notes, spanning 12 octaves. Since there are only
# 128 midi notes, the last 16 positions on the unit circle will not be used.
self._note_table = np.zeros((144, 2))
for i in range(144):
angle = (float(i) * math.pi) / 72.0
self._note_table[i] = [math.cos(angle), math.sin(angle)]
# Create a lookup table for modulo-bins encoding of time_shifts.
self._time_shift_table = np.zeros((max_shift_steps, 2))
for i in range(max_shift_steps):
angle = (float(i) * 2.0 * math.pi) / float(max_shift_steps)
self._time_shift_table[i] = [math.cos(angle), math.sin(angle)]
# Create a lookup table for modulo-bins encoding of velocities.
if num_velocity_bins > 0:
self._velocity_table = np.zeros((num_velocity_bins, 2))
for i in range(num_velocity_bins):
angle = (float(i) * 2.0 * math.pi) / float(num_velocity_bins)
self._velocity_table[i] = [math.cos(angle), math.sin(angle)]
@property
def input_size(self):
total = 0
for _, _, _, encoder_width in self._event_ranges:
total += encoder_width
return total
def encode_modulo_event(self, event):
offset = 0
for event_type, min_value, _, encoder_width in self._event_ranges:
if event.event_type == event_type:
value = event.event_value - min_value
return offset, event_type, value
offset += encoder_width
raise ValueError('Unknown event type: %s' % event.event_type)
def embed_pitch_class(self, value):
if value < 0 or value >= 12:
raise ValueError('Unexpected pitch class value: %s' % value)
return self._pitch_class_table[value]
def embed_note(self, value):
if value < 0 or value >= 144:
raise ValueError('Unexpected note value: %s' % value)
return self._note_table[value]
def embed_time_shift(self, value):
if value < 0 or value >= self._max_shift_steps:
raise ValueError('Unexpected time shift value: %s' % value)
return self._time_shift_table[value]
def embed_velocity(self, value):
if value < 0 or value >= self._num_velocity_bins:
raise ValueError('Unexpected velocity value: %s' % value)
return self._velocity_table[value]
class ModuloPerformanceEventSequenceEncoderDecoder(EventSequenceEncoderDecoder):
"""An EventSequenceEncoderDecoder for modulo encoding performance events.
ModuloPerformanceEventSequenceEncoderDecoder is an EventSequenceEncoderDecoder
that uses modulo/circular encoding for encoding performance input events, and
otherwise uses one hot encoding for encoding and decoding of labels.
"""
def __init__(self, num_velocity_bins=0,
max_shift_steps=performance_lib.DEFAULT_MAX_SHIFT_STEPS):
"""Initialize a ModuloPerformanceEventSequenceEncoderDecoder object.
Args:
num_velocity_bins: Number of velocity bins.
max_shift_steps: Maximum number of shift steps supported.
"""
self._modulo_encoding = PerformanceModuloEncoding(
num_velocity_bins=num_velocity_bins, max_shift_steps=max_shift_steps)
self._one_hot_encoding = PerformanceOneHotEncoding(
num_velocity_bins=num_velocity_bins, max_shift_steps=max_shift_steps)
@property
def input_size(self):
return self._modulo_encoding.input_size
@property
def num_classes(self):
return self._one_hot_encoding.num_classes
@property
def default_event_label(self):
return self._one_hot_encoding.encode_event(
self._one_hot_encoding.default_event)
def events_to_input(self, events, position):
"""Returns the input vector for the given position in the event sequence.
Returns a modulo/circular encoding for the given position in the performance
event sequence.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
An input vector, a list of floats.
"""
input_ = [0.0] * self.input_size
offset, event_type, value = (self._modulo_encoding
.encode_modulo_event(events[position]))
input_[offset] = 1.0 # valid bit for the event
offset += 1
if (event_type == performance_lib.PerformanceEvent.NOTE_ON or
event_type == performance_lib.PerformanceEvent.NOTE_OFF):
# Encode the note on a circle of 144 notes, covering 12 octaves.
cosine_sine_pair = self._modulo_encoding.embed_note(value)
input_[offset] = cosine_sine_pair[0]
input_[offset + 1] = cosine_sine_pair[1]
offset += 2
# Encode the note's pitch class, using the encoder's lookup table.
value %= 12
cosine_sine_pair = self._modulo_encoding.embed_pitch_class(value)
input_[offset] = cosine_sine_pair[0]
input_[offset + 1] = cosine_sine_pair[1]
else:
# This must be a velocity, or a time-shift event. Encode it using
# modulo-bins embedding.
if event_type == performance_lib.PerformanceEvent.TIME_SHIFT:
cosine_sine_pair = self._modulo_encoding.embed_time_shift(value)
else:
cosine_sine_pair = self._modulo_encoding.embed_velocity(value)
input_[offset] = cosine_sine_pair[0]
input_[offset + 1] = cosine_sine_pair[1]
return input_
def events_to_label(self, events, position):
"""Returns the label for the given position in the event sequence.
Returns the zero-based index value for the given position in the event
sequence, as determined by the one hot encoding.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
A label, an integer.
"""
return self._one_hot_encoding.encode_event(events[position])
def class_index_to_event(self, class_index, events):
"""Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
events: A list-like sequence of events. This object is not used in this
implementation.
Returns:
An event value.
"""
return self._one_hot_encoding.decode_event(class_index)
def labels_to_num_steps(self, labels):
"""Returns the total number of time steps for a sequence of class labels.
Args:
labels: A list-like sequence of integers in the range
[0, self.num_classes).
Returns:
The total number of time steps for the label sequence, as determined by
the one-hot encoding.
"""
events = []
for label in labels:
events.append(self.class_index_to_event(label, events))
return sum(self._one_hot_encoding.event_to_num_steps(event)
for event in events)
class PerformanceOneHotEncoding(encoder_decoder.OneHotEncoding):
"""One-hot encoding for performance events."""
def __init__(self, num_velocity_bins=0,
max_shift_steps=performance_lib.DEFAULT_MAX_SHIFT_STEPS,
min_pitch=performance_lib.MIN_MIDI_PITCH,
max_pitch=performance_lib.MAX_MIDI_PITCH):
self._event_ranges = [
(PerformanceEvent.NOTE_ON, min_pitch, max_pitch),
(PerformanceEvent.NOTE_OFF, min_pitch, max_pitch),
(PerformanceEvent.TIME_SHIFT, 1, max_shift_steps)
]
if num_velocity_bins > 0:
self._event_ranges.append(
(PerformanceEvent.VELOCITY, 1, num_velocity_bins))
self._max_shift_steps = max_shift_steps
@property
def num_classes(self):
return sum(max_value - min_value + 1
for event_type, min_value, max_value in self._event_ranges)
@property
def default_event(self):
return PerformanceEvent(
event_type=PerformanceEvent.TIME_SHIFT,
event_value=self._max_shift_steps)
def encode_event(self, event):
offset = 0
for event_type, min_value, max_value in self._event_ranges:
if event.event_type == event_type:
return offset + event.event_value - min_value
offset += max_value - min_value + 1
raise ValueError('Unknown event type: %s' % event.event_type)
def decode_event(self, index):
offset = 0
for event_type, min_value, max_value in self._event_ranges:
if offset <= index <= offset + max_value - min_value:
return PerformanceEvent(
event_type=event_type, event_value=min_value + index - offset)
offset += max_value - min_value + 1
raise ValueError('Unknown event index: %s' % index)
def event_to_num_steps(self, event):
if event.event_type == PerformanceEvent.TIME_SHIFT:
return event.event_value
else:
return 0
class NotePerformanceEventSequenceEncoderDecoder(
EventSequenceEncoderDecoder):
"""Multiple one-hot encoding for event tuples."""
def __init__(self, num_velocity_bins, max_shift_steps=1000,
max_duration_steps=1000,
min_pitch=performance_lib.MIN_MIDI_PITCH,
max_pitch=performance_lib.MAX_MIDI_PITCH):
self._min_pitch = min_pitch
def optimal_num_segments(steps):
segments_indices = [(i, i + steps / i) for i in range(1, steps)
if steps % i == 0]
return min(segments_indices, key=lambda v: v[1])[0]
# Add 1 because we need to represent 0 time shifts.
self._shift_steps_segments = optimal_num_segments(max_shift_steps + 1)
assert self._shift_steps_segments > 1
self._shift_steps_per_segment = (
(max_shift_steps + 1) // self._shift_steps_segments)
self._max_duration_steps = max_duration_steps
self._duration_steps_segments = | |
<reponame>sappelhoff/ecomp_analysis
"""Calculate RSA neurometrics.
- import subj/stream wise rdm_times arrays
- for each subj/stream, average over time window: 9x9 RDM
- create an array of numberline RDMs with different parameters each: kappa, bias
- for each subj/stream/meantime RDM, correlate with all model RDMs --> grid
- plot mean over grids for each stream
- plot individual grid maxima
- plot mean grid maximum
- make maps relative to "linear" map: all models that lead to worse correlation
are coded "<= 0" (minimum color)
- calculate t values over participant correlations: ttest or wilcoxon
"""
# %%
# Imports
import itertools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pingouin
import scipy.stats
import statsmodels.stats.multitest
from scipy.spatial.distance import squareform
from scipy.stats import sem
from tqdm.auto import tqdm
from config import ANALYSIS_DIR_LOCAL, DATA_DIR_EXTERNAL, STREAMS, SUBJS
from model_rdms import get_models_dict
# %%
# Settings
# Select the data source and analysis directory here
data_dir = DATA_DIR_EXTERNAL
analysis_dir = ANALYSIS_DIR_LOCAL
grid_res = 101
opt = 4
if opt == 0:
kappas = np.linspace(0.4, 4.0, grid_res)
biases = np.linspace(-1.0, 1.0, grid_res)
elif opt == 1:
kappas = np.linspace(0.5, 10.0, grid_res)
biases = np.linspace(-0.75, 0.75, grid_res)
elif opt == 2:
kappas = np.linspace(0.5, 10.0, grid_res)
biases = np.linspace(-0.5, 0.5, grid_res)
elif opt == 3:
grid_res = 131
kappas = np.linspace(0.0, 6.5, grid_res)
biases = np.linspace(-0.5, 0.5, grid_res)
elif opt == 4:
grid_res = 131
kappas = np.exp(np.linspace(-2, 2, grid_res))
biases = np.linspace(-0.5, 0.5, grid_res)
else:
raise RuntimeError(f"unknown 'opt': {opt}")
bias_kappa_combis = list(itertools.product(biases, kappas))
idx_bias_zero = (np.abs(biases - 0.0)).argmin()
idx_kappa_one = (np.abs(kappas - 1.0)).argmin()
window_sel = (0.2, 0.6) # representative window, look at RSA timecourse figure
pthresh = 0.05
subtract_maps = True
rdm_size = "18x18"
ndim = int(rdm_size.split("x")[0])
# "Pearson' r", "Kendall's tau-b", "Spearman's rho"
corr_method = "Pearson's r"
# whether or not to orthogonalize the model RDMs
orth = True
# which models to orthogonalize "numberline" with?
modelnames = ["digit", "color", "numberline"]
if rdm_size == "9x9":
modelnames = ["numberline"]
orth = False
print("For 9x9 neurometrics, always run without orth and only numberline.")
# overwrite for saving?
overwrite = False
# %%
# Prepare file paths
derivatives = data_dir / "derivatives"
mahal_dir = data_dir / "derivatives" / "rsa" / rdm_size / "rdms_mahalanobis"
fname_rdm_template = str(mahal_dir / "sub-{:02}_stream-{}_rdm-mahal.npy")
fname_times = mahal_dir / "times.npy"
fname_params = analysis_dir / "derived_data" / "neurometrics_params.tsv"
fname_grids = analysis_dir / "derived_data" / "neurometrics_grids.npy"
fname_scatters = analysis_dir / "derived_data" / "neurometrics_scatters.npy"
fname_bs_ks = analysis_dir / "derived_data" / "neurometrics_bs_ks.npy"
# %%
# save biases, kappas
np.save(fname_bs_ks, np.stack([biases, kappas]))
# %%
# Get times for RDM timecourses
times = np.load(fname_times)
idx_start = np.nonzero(times == window_sel[0])[0][0]
idx_stop = np.nonzero(times == window_sel[1])[0][0]
# %%
# Load all rdm_times and form mean
rdm_mean_streams_subjs = np.full((ndim, ndim, len(STREAMS), len(SUBJS)), np.nan)
for isub, sub in enumerate(tqdm(SUBJS)):
for istream, stream in enumerate(STREAMS):
rdm_times = np.load(fname_rdm_template.format(sub, stream))
rdm_mean = np.mean(rdm_times[..., idx_start:idx_stop], axis=-1)
rdm_mean_streams_subjs[..., istream, isub] = rdm_mean
# %%
# Calculate model RDMs
key = "orth" if orth else "no_orth"
model_rdms = np.full((ndim, ndim, len(bias_kappa_combis)), np.nan)
for i, (bias, kappa) in enumerate(tqdm(bias_kappa_combis)):
models_dict = get_models_dict(rdm_size, modelnames, orth, bias=bias, kappa=kappa)
dv_rdm = models_dict[key]["numberline"]
model_rdms[..., i] = dv_rdm
assert not np.isnan(model_rdms).any()
# %%
# Correlate ERP-RDMs and models --> one grid per subj/stream
grid_streams_subjs = np.full(
(len(kappas), len(biases), len(STREAMS), len(SUBJS)), np.nan
)
for isub, sub in enumerate(tqdm(SUBJS)):
for istream, stream in enumerate(STREAMS):
# Get ERP ERM
rdm_mean = rdm_mean_streams_subjs[..., istream, isub]
rdm_mean_vec = squareform(rdm_mean)
for icombi, (bias, kappa) in enumerate(bias_kappa_combis):
rdm_model = model_rdms[..., icombi]
rdm_model_vec = squareform(rdm_model)
if corr_method == "Pearson's r":
corr, _ = scipy.stats.pearsonr(rdm_mean_vec, rdm_model_vec)
elif corr_method == "Kendall's tau-b":
corr, _ = scipy.stats.kendalltau(rdm_mean_vec, rdm_model_vec)
else:
assert corr_method == "Spearman's rho"
corr, _ = scipy.stats.spearmanr(rdm_mean_vec, rdm_model_vec)
idx_bias = np.nonzero(biases == bias)[0][0]
idx_kappa = np.nonzero(kappas == kappa)[0][0]
grid_streams_subjs[idx_kappa, idx_bias, istream, isub] = corr
# %%
# Normalize maps to be relative to bias=0, kappa=1
if subtract_maps:
rng = np.random.default_rng(42)
for isub, sub in enumerate(tqdm(SUBJS)):
for istream, stream in enumerate(STREAMS):
corr_ref = grid_streams_subjs[idx_kappa_one, idx_bias_zero, istream, isub]
grid_streams_subjs[..., istream, isub] -= corr_ref
# subtracting the value at k=1, b=0 from the map will make the k=1 row 0.
# This is because when k=1, different biases will not result in different
# numdist RDMs.
# Solution: Add tiny amount of random noise to that row,
# so that down-the-line tests don't run into NaN problems
noise = rng.normal(size=grid_res) * 1e-8
grid_streams_subjs[idx_kappa_one, :, istream, isub] += noise
# %%
# Calculate 1 samp t-tests against 0 for each cell to test significance
pval_maps_streams = np.full((len(kappas), len(biases), len(STREAMS)), np.nan)
for istream, stream in enumerate(STREAMS):
data = grid_streams_subjs[..., istream, :]
_, pvals = scipy.stats.ttest_1samp(a=data, popmean=0, axis=-1, nan_policy="raise")
pval_maps_streams[..., istream] = pvals
assert not np.isnan(pvals).any()
# %%
# Create a mask for plotting the significant values in grid
# use B/H FDR correction
alpha_val_mask = 0.75
sig_masks_streams = np.full_like(pval_maps_streams, np.nan)
corrected_pval_maps_streams = np.full_like(pval_maps_streams, np.nan)
for istream, stream in enumerate(STREAMS):
pvals = pval_maps_streams[..., istream]
sig, corrected = statsmodels.stats.multitest.fdrcorrection(
pvals.flatten(), alpha=pthresh
)
sig = sig.reshape(pvals.shape)
corrected = corrected.reshape(pvals.shape)
# all non-significant values have a lower "alpha value" in the plot
mask_alpha_vals = sig.copy().astype(float)
mask_alpha_vals[mask_alpha_vals == 0] = alpha_val_mask
sig_masks_streams[..., istream] = mask_alpha_vals
# save pvals for reporting
corrected_pval_maps_streams[..., istream] = corrected
# NOTE: Need to lower alpha values of cells with corr <= 0
# that are still significant before plotting
# E.g., a cell significantly worsens correlation -> adjust alpha
# %%
# Plot grid per stream
mean_max_xys = []
mean_max_pvals = []
grids = []
scatters = []
max_coords_xy = np.full((2, len(STREAMS), len(SUBJS)), np.nan)
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
fig.tight_layout()
aspect = "equal"
for istream, stream in enumerate(STREAMS):
ax = axs.flat[istream]
# settings
cbarlabel = corr_method
vmin = None
vmax = max(
grid_streams_subjs[..., 0, :].mean(axis=-1).max(),
grid_streams_subjs[..., 1, :].mean(axis=-1).max(),
)
if subtract_maps:
cbarlabel = "Δ " + cbarlabel
vmin = 0
# Calculate subj wise maxima
for isub, sub in enumerate(SUBJS):
shape = grid_streams_subjs[..., istream, isub].shape
argmax = np.argmax(grid_streams_subjs[..., istream, isub])
max_coords_xy[..., istream, isub] = np.unravel_index(argmax, shape)[::-1]
# plot mean grid
grid_mean = np.mean(grid_streams_subjs[..., istream, :], axis=-1)
mask = sig_masks_streams[..., istream]
mask[grid_mean <= 0] = alpha_val_mask
grids.append((grid_mean, mask))
_ = ax.imshow(
grid_mean,
origin="upper",
interpolation="nearest",
vmin=vmin,
vmax=vmax,
alpha=mask,
aspect=aspect,
)
# tweak to get colorbar without alpha mask
_, tweak_ax = plt.subplots()
im = tweak_ax.imshow(
grid_mean,
origin="upper",
interpolation="nearest",
vmin=vmin,
vmax=vmax,
aspect=aspect,
)
plt.close(_)
# plot colorbar
cbar = fig.colorbar(
im, ax=ax, orientation="horizontal", label=cbarlabel, shrink=0.75
)
if subtract_maps:
uptick = (
max(im.get_array().max(), vmax)
if vmax is not None
else im.get_array().max()
)
cbar_ticks = np.linspace(0, uptick, 4)
cbar.set_ticks(cbar_ticks)
cbar.ax.set_xticklabels(["<=0"] + [f"{i:.2}" for i in cbar_ticks[1:]])
# plot subj maxima
_xs = max_coords_xy[..., istream, :][0]
_ys = max_coords_xy[..., istream, :][1]
scatters.append((_xs, _ys))
ax.scatter(
_xs,
_ys,
color="red",
s=4,
zorder=10,
)
# plot mean maximum
mean_max_xy = np.unravel_index(np.argmax(grid_mean), grid_mean.shape)[::-1]
mean_max_xys += [mean_max_xy]
mean_max_x, mean_max_y = mean_max_xy
mean_max_pvals += [corrected_pval_maps_streams[mean_max_x, mean_max_y, istream]]
ax.scatter(
mean_max_x,
mean_max_y,
color="red",
s=24,
marker="d",
zorder=10,
)
# lines
ax.axvline(idx_bias_zero, color="white", ls="--")
ax.axhline(idx_kappa_one, color="white", ls="--")
# titles
ylabel = "log (k)" if opt == 4 else "kappa (k)"
ax.set(
title=stream,
xlabel="bias (b)",
ylabel=ylabel,
)
title = f"Transparent mask shows significant values at p={pthresh} (FDR corrected)"
if subtract_maps:
title = (
"Improved model correlation relative to linear model (b=0, k=1)\n" + title
)
title = f"rdm_size={rdm_size}, orth={orth}\n" + title
fig.suptitle(title, y=1.15)
# ticks
if opt == 4:
xticks = [0, 65, 130]
ax.set_xticks(ticks=xticks)
ax.set_xticklabels(biases[np.array(xticks)])
yticks = [0, 65, 130]
ax.set_yticks(ticks=yticks)
ax.set_yticklabels(np.log(kappas)[np.array(yticks)])
else:
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.yaxis.set_major_locator(plt.MaxNLocator(6))
xticklabels = (
[""]
+ [f"{i:.2f}" for i in biases[(ax.get_xticks()[1:-1]).astype(int)]]
+ [""]
)
yticklabels = (
[""]
+ [f"{i:.1f}" for i in kappas[(ax.get_yticks()[1:-1]).astype(int)]]
+ [""]
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=UserWarning, message="FixedFormatter .* FixedLocator"
)
ax.set(
xticklabels=xticklabels,
yticklabels=yticklabels,
)
# %%
# Save for publication plots
# single grid, single mask, dual grid, dual mask
savegrids = np.stack([grids[0][0], grids[0][1], grids[1][0], grids[1][1]])
np.save(fname_grids, savegrids)
# single xs, single ys, dual xs, dual ys
savescatters = np.stack(
[scatters[0][0], scatters[0][1], scatters[1][0], scatters[1][1]]
)
np.save(fname_scatters, savescatters)
# %%
# Plot single subj maps
for stream in STREAMS:
istream = STREAMS.index(stream)
fig, axs = plt.subplots(5, 6, figsize=(10, 10))
for isub, sub in enumerate(SUBJS):
grid = grid_streams_subjs[..., istream, isub]
ax = axs.flat[isub]
ax.imshow(grid)
_xs = max_coords_xy[..., istream, isub][0]
_ys = max_coords_xy[..., istream, isub][1]
ax.scatter(
_xs,
_ys,
color="red",
s=4,
zorder=10,
)
ax.set_axis_off()
fig.suptitle(f"Single subjects: {stream}")
# %%
# Save single subj bias and kappa maxima
dfs = []
for stream in STREAMS:
istream = STREAMS.index(stream)
bs = biases[max_coords_xy[0, istream, :].astype(int)]
ks = kappas[max_coords_xy[1, istream, :].astype(int)]
df = pd.DataFrame([bs, ks]).T
df.columns = ["bias", "kappa"]
df.insert(0, "stream", stream)
df.insert(0, "subject", SUBJS)
dfs.append(df)
df = pd.concat(dfs).sort_values(["subject", "stream"]).reset_index(drop=True)
df["rdm_size"] = rdm_size
df["subtract_maps"] = subtract_maps
df["orth"] = orth
df["corr_method"] = corr_method
df.insert(2, "mapmax_pval", | |
"""
dense_electrode.py
Class file for dense metal (e.g. Li) electrode methods
"""
import cantera as ct
from math import tanh
import numpy as np
class electrode():
"""
Create an electrode object representing the dense electrode
"""
def __init__(self, input_file, inputs, sep_inputs, counter_inputs,
electrode_name, params, offset):
"""
Initialize the model.
"""
# Import relevant Cantera objects.
self.bulk_obj = ct.Solution(input_file, inputs['bulk-phase'])
self.elyte_obj = ct.Solution(input_file, inputs['electrolyte-phase'])
self.conductor_obj = ct.Solution(input_file, inputs['conductor-phase'])
self.surf_obj = ct.Interface(input_file, inputs['surf-phase'],
[self.bulk_obj, self.elyte_obj, self.conductor_obj])
# Anode or cathode? Positive external current delivers positive charge
# to the anode, and removes positive charge from the cathode.
self.name = electrode_name
if self.name=='anode':
self.i_ext_flag = -1
elif self.name=='cathode':
self.i_ext_flag = 1
else:
raise ValueError("Electrode must be an anode or a cathode.")
# Store the species index of the Li ion in the Cantera object for the
# electrolyte phase:
self.index_Li = self.elyte_obj.species_index(inputs['mobile-ion'])
# Electrode thickness
self.dy = inputs['thickness']
# The electrode consumption rate quickly goes to zero, below a
# user-specified minimum thickness:
self.min_thickness = inputs['minimum-thickness']
# Interfacial surface area, per unit geometric area.
self.A_surf_ratio = inputs['A_surf_ratio']
# Inverse of the double layer capacitance, per unit interface area:
self.C_dl_Inv = 1/inputs['C_dl']
# Thickness of separator node considered as part of the anode domain.
# This is "subtracted" off from the total separator thickness.
self.dy_elyte = inputs['dy_elyte']
# Electrolyte volume fraction in the separator:
self.eps_elyte = sep_inputs['eps_electrolyte']
# Microstructure-based transport scaling factor, based on Bruggeman
# coefficient of -0.5:
self.elyte_microstructure = self.eps_elyte**1.5
# SV_offset specifies the index of the first SV variable for the
# electode (zero for anode, n_vars_anode + n_vars_sep for the cathode)
self.SV_offset = offset
# Dense Li is not capacity-limiting, in and of itself. Rather the
# total amount of Li in the system is the limit. This is done in a
# separate routine, at a later time. Provide a large placeholder number
# here, so that it will not be the minimum, when evaluated later:
self.capacity = 1e21
# Mumber of state variables: electrode potential, electrolyte
# potential, thickness, electrolyte composition (n_species)
self.n_vars = 3 + self.elyte_obj.n_species
# This model produces one plot, for the electrode thickness:
self.n_plots = 1
# Set the Cantera object state.
self.bulk_obj.electric_potential = inputs['phi_0']
# If the user provided an initial composition, use that, here:
if 'X_0' in inputs:
self.bulk_obj.TPX = params['T'], params['P'], inputs['X_0']
else:
self.bulk_obj.TP = params['T'], params['P']
self.elyte_obj.TP = params['T'], params['P']
self.surf_obj.TP = params['T'], params['P']
self.conductor_obj.TP = params['T'], params['P']
def residual(self, SV, SVdot, sep, params):
"""
Define the residual for the state of the dense self.
This is an array of differential and algebraic governing equations, one for each state variable in the anode (anode plus a thin layer of electrolyte + separator).
1. The electric potential is an algebraic variable.
In the anode, phi = 0 is the reference potential for the system.
In the cathode, the electric potential must be such that the ionic current is spatially in_variant (i.e. it is constant and equal to the external applied current, for galvanostatic simulations).
The residual corresponding to these variables (suppose an index 'j') are of the form:
resid[j] = (epression equaling zero)
2. All other variables are governed by differential equations.
We have a means to calculate dSV[j]/dt for a state variable SV[j] (state variable with index j).
The residuals corresponding to these variables will have the form:
resid[j] = SVdot[j] - (expression equalling dSV/dt)
Inputs:
- SV: the solution vector representing the state of the entire battery domain.
- SVdot: the time derivative of each state variable: dSV/dt
- electrode: the object representing the current electrode
- sep: the object representing the separator
- counter: the object representing the electrode counter to the current electrode
- params: dict of battery simulation parameters.
"""
# Initialize the residual array:
resid = np.zeros((self.n_vars,))
# Save local copies of the solution vectors, pointers for this electrode:
SVptr = self.SVptr
SV_loc = SV[SVptr['electrode']]
SVdot_loc = SVdot[SVptr['electrode']]
# Read electrode and electrolyte electric potentials:
phi_ed = SV_loc[SVptr['phi_ed']]
phi_elyte = phi_ed + SV_loc[SVptr['phi_dl']]
# Set electric potentials for Cantera objects:
self.bulk_obj.electric_potential = phi_ed
self.conductor_obj.electric_potential = phi_ed
self.elyte_obj.electric_potential = phi_elyte
# Multiplier on the electrode removal reaction. Quickly goes to zero, for
# thicknesses below a user-specified minimum:
mult = tanh(SV_loc[SVptr['thickness']]/self.min_thickness)
# Molar production rate of electrons in the 'conductor' phase. Electrons are created when lithium is consumed. We scale the the electron creation rate, then, by our multiplier. When the anode thickness is below the minimum, the electron creation rate goes quickly to zero, but the electron destruction rate is unaffected:
sdot_electron = \
(mult*self.surf_obj.get_creation_rates(self.conductor_obj)
- self.surf_obj.get_destruction_rates(self.conductor_obj))
# Molar production rate of electrode species (kmol/m2/s). Here, we scale
# the destruction rate by our multiplier.
sdot_electrode = (self.surf_obj.get_creation_rates(self.bulk_obj)
- mult*self.surf_obj.get_destruction_rates(self.bulk_obj))
# Faradaic current density is positive when electrons are consumed
# (Li transferred to the anode)
i_Far = -ct.faraday*sdot_electron
# Double layer current has the same sign as i_Far:
i_dl = self.i_ext_flag*params['i_ext'] / self.A_surf_ratio - i_Far
# Differential equation for the double layer potential difference:
resid[SVptr['phi_dl']] = (SVdot_loc[SVptr['phi_dl']]
- i_dl*self.C_dl_Inv)
# Flux of electrolyte species between the separator and the electrolyte in
# the current electrode domain:
N_k_sep, i_io = sep.electrode_boundary_flux(SV, self, params['T'])
# Electrode electric potential
if self.name=='anode':
# For the anode, the electric potential is an algebraic variable,
# always equal to zero:
resid[SVptr['phi_ed']] = SV_loc[SVptr['phi_ed']]
elif self.name=='cathode':
# The electric potential of the electrolyte in the cathode domain must
# be such that the ionic current from the separator to the cathode
# equals the external current:
resid[SVptr['phi_ed']] = i_io - params['i_ext']
# Change in thickness per time:
dH_dt = np.dot(sdot_electrode, self.bulk_obj.partial_molar_volumes)
resid[SVptr['thickness']] = SVdot_loc[SVptr['thickness']] - dH_dt
# Set time derivatives for electrolyte species concentrations to zero
# (temporary)
# Molar production rate of electrode species (kmol/m2/s). Here, we scale
# the destruction rate by our multiplier.
sdot_electrolyte = \
(mult*self.surf_obj.get_creation_rates(self.elyte_obj)
- self.surf_obj.get_destruction_rates(self.elyte_obj))
# Double layer current removes Li from the electrolyte. Add this to
# sdot_electrolyte:
sdot_electrolyte[self.index_Li] -= i_dl / ct.faraday
dCk_elyte_dt = \
(sdot_electrolyte * self.A_surf_ratio
+ self.i_ext_flag * N_k_sep) / self.dy_elyte
resid[SVptr['C_k_elyte']] = SVdot_loc[SVptr['C_k_elyte']] - dCk_elyte_dt
return resid
def initialize(self, inputs, sep_inputs):
# Initialize the solution vector for the electrode domain:
SV = np.zeros([self.n_vars])
# Set up pointers to specific variables in the solution vector:
self.SVptr = {}
self.SVptr['phi_ed'] = np.array([0])
self.SVptr['phi_dl'] = np.array([1])
self.SVptr['thickness'] = np.array([2])
self.SVptr['C_k_elyte'] = np.arange(3,
3 + self.elyte_obj.n_species)
# There is only one node, but give the pointer a shape so that SVptr
# ['C_k_elyte'][j] accesses the pointer array:
self.SVptr['C_k_elyte'].shape = (1, self.elyte_obj.n_species)
# A pointer to where the SV variables for this electrode are, within the
# overall solution vector for the entire problem:
self.SVptr['electrode'] = np.arange(self.SV_offset,
self.SV_offset+self.n_vars)
# Save the SV indices of any algebraic variables:
self.algvars = self.SV_offset + self.SVptr['phi_ed'][:]
# Load intial state variable values:
SV[self.SVptr['phi_ed']] = inputs['phi_0']
SV[self.SVptr['phi_dl']] = sep_inputs['phi_0'] - inputs['phi_0']
SV[self.SVptr['thickness']] = inputs['thickness']
SV[self.SVptr['C_k_elyte']] = self.elyte_obj.concentrations
return SV
def voltage_lim(self, SV, val):
"""
Check to see if the voltage limits have been exceeded.
"""
# Save local copies of the solution vector and pointers for this electrode:
SVptr = self.SVptr
SV_loc = SV[SVptr['electrode']]
# Calculate the current voltage, relative to the limit. The simulation
# looks for instances where this value changes sign (i.e. crosses zero)
voltage_eval = SV_loc[SVptr['phi_ed']] - val
return voltage_eval
def adjust_separator(self, sep):
"""
The electrode domain considers the electrode object plus a thin layer of the separator, adjacent to the self. We subtract this thickness from the total separator thickness, so that we do not inadvertently increase the total | |
# -*- coding: utf-8 -*-
"""
module for implementation of indicator class, which is designed as MinIn for systems with netvalues
"""
import pandas as pd
from pyecharts.globals import CurrentConfig, NotebookType
from pyecharts import options as opts
from pyecharts.charts import Kline, Line, Bar, Grid
from pyecharts.commons.utils import JsCode
from xalpha.cons import line_opts, opendate, yesterdayobj, sqrt_days_in_year
def _upcount(ls):
"""
count the ratio of upmove days by given a list
"""
count = 0
for i in range(len(ls) - 1):
# somehow after pandas 0.23(22?), the input is a series(dataframe?) and old list supporting syntax are illegal
if ls.iloc[i + 1] > ls.iloc[i]:
count += 1
return count / (len(ls) - 1)
class indicator:
"""
MixIn class provide quant indicator tool box which is desinged as interface for mulfix class as well
as info class, who are both treated as a single fund with price table of net value.
Most of the quant indexes, their name conventions, definitions and calculations are from
`joinquant <https://www.joinquant.com/help/api/help?name=api#%E9%A3%8E%E9%99%A9%E6%8C%87%E6%A0%87>`_.
Make sure first run obj.bcmkset() before you want to use functions in this class.
"""
def bcmkset(self, infoobj, start=None, riskfree=0.0371724, name="基金组合"):
"""
Once you want to utilize the indicator tool box for analysis, first run bcmkset function to set
the benchmark, otherwise most of the functions would raise error.
:param infoobj: info obj, whose netvalue are used as benchmark
:param start: datetime obj, indicating the starting date of all analysis.
Note if use default start, there may be problems for some fundinfo obj, as lots of
funds lack netvalues of several days from our API, resulting unequal length between
benchmarks and fund net values.
:param riskfree: float, annual rate in the unit of 100%, strongly suggest make this value
consistent with the interest parameter when instanciate cashinfo() class
"""
self._pricegenerate(name)
if start is None:
self.start = self.price.iloc[0].date
elif isinstance(start, str):
self.start = pd.to_datetime(
start, format="%Y-%m-%d"
) # pd.Timestamp.strptime(start, "%Y-%m-%d")
self.benchmark = infoobj
self.riskfree = riskfree
self.bmprice = self.benchmark.price[self.benchmark.price["date"] >= self.start]
self.price = self.price[self.price["date"] >= self.start]
self.bmprice = self.bmprice[self.bmprice["date"].isin(self.price["date"])]
self.price = self.price[self.price["date"].isin(self.bmprice["date"])]
# the price data is removed from the infoobj before start date
def _pricegenerate(self, name):
"""
generate price table for mulfix class, the cinfo class has this attr by default
"""
if getattr(self, "price", None) is None: # 基金组合类,而非基金信息类
times = pd.date_range(self.totcftable.iloc[0].date, yesterdayobj())
netvalue = []
for date in times:
netvalue.append(self.unitvalue(date)) # may take a long time
self.price = pd.DataFrame(data={"date": times, "netvalue": netvalue})
self.price = self.price[self.price["date"].isin(opendate)]
self.name = name
def comparison(self, date=yesterdayobj()):
"""
:returns: tuple of two pd.Dataframe, the first is for aim and the second if for the benchmark index
all netvalues are normalized and set equal 1.00 on the self.start date
"""
partp = self.price[self.price["date"] <= date]
partm = self.bmprice[self.bmprice["date"] <= date]
normp = partp.iloc[0].netvalue
normm = partm.iloc[0].netvalue
partp["netvalue"] = partp["netvalue"] / normp
partm["netvalue"] = partm["netvalue"] / normm
return (partp, partm)
def total_return(self, date=yesterdayobj()):
return round(
(
self.price[self.price["date"] <= date].iloc[-1].netvalue
- self.price.iloc[0].netvalue
)
/ self.price.iloc[0].netvalue,
4,
)
@staticmethod
def annualized_returns(price, start, date=yesterdayobj()):
"""
:param price: price table of info().price
:param start: datetime obj for starting date of calculation
:param date: datetime obj for ending date of calculation
:returns: float, annualized returns of the price table
"""
datediff = (price[price["date"] <= date].iloc[-1].date - start).days
totreturn = (
price[price["date"] <= date].iloc[-1].netvalue - price.iloc[0].netvalue
) / price.iloc[0].netvalue
return round((1 + totreturn) ** (365 / datediff) - 1, 4)
def total_annualized_returns(self, date=yesterdayobj()):
return indicator.annualized_returns(self.price, self.start, date)
def benchmark_annualized_returns(self, date=yesterdayobj()):
return indicator.annualized_returns(self.bmprice, self.start, date)
def pct_chg(self, freq="Y", benchmark=True):
"""
年度,月,周涨幅统计
:param freq: str, default Y, could be M or W or anything pd.date_range accepts
:return: pd.DataFrame with columns date and pct_chg
"""
if getattr(self, "bmprice", None) is None:
benchmark = False
ydf = pd.merge_asof(
pd.DataFrame(
pd.date_range(
self.price["date"].iloc[0], self.price["date"].iloc[-1], freq=freq
),
columns=["date"],
),
self.price,
)
ydf["pct_chg"] = ydf["netvalue"].pct_change()
if benchmark:
ydf = pd.merge_asof(ydf, self.bmprice, on="date", suffixes=["", "_bc"])
ydf["pct_chg_benchmark"] = ydf["netvalue_bc"].pct_change()
ydf["pct_chg_difference"] = ydf["pct_chg"] - ydf["pct_chg_benchmark"]
return ydf[["date", "pct_chg", "pct_chg_benchmark", "pct_chg_difference"]]
return ydf[["date", "pct_chg"]]
def beta(self, date=yesterdayobj()):
bcmk = indicator.ratedaily(self.bmprice, date)
bt = indicator.ratedaily(self.price, date)
df = pd.DataFrame(data={"bcmk": bcmk, "bt": bt})
res = df.cov()
return res.loc["bcmk", "bt"] / res.loc["bcmk", "bcmk"]
def alpha(self, date=yesterdayobj()):
rp = self.total_annualized_returns(date)
rm = self.benchmark_annualized_returns(date)
beta = self.beta(date)
return rp - (self.riskfree + beta * (rm - self.riskfree))
def correlation_coefficient(self, date=yesterdayobj()):
"""
correlation coefficient between aim and benchmark values,
可以很好地衡量指数基金的追踪效果
:returns: float between -1 and 1
"""
bcmk = indicator.ratedaily(self.bmprice, date)
bt = indicator.ratedaily(self.price, date)
df = pd.DataFrame(data={"bcmk": bcmk, "bt": bt})
res = df.cov()
return res.loc["bcmk", "bt"] / (
(res.loc["bcmk", "bcmk"] ** 0.5) * res.loc["bt", "bt"] ** 0.5
)
@staticmethod
def ratedaily(price, date=yesterdayobj()):
partp = price[price["date"] <= date]
return list(partp["netvalue"].pct_change())[1:]
# return [
# (partp.iloc[i + 1].netvalue - partp.iloc[i].netvalue)
# / partp.iloc[i].netvalue
# for i in range(len(partp) - 1)
# ]
@staticmethod
def volatility(price, date=yesterdayobj()):
df = pd.DataFrame(data={"rate": indicator.ratedaily(price, date)})
return df.std().rate * sqrt_days_in_year
def algorithm_volatility(self, date=yesterdayobj()):
return indicator.volatility(self.price, date)
def benchmark_volatility(self, date=yesterdayobj()):
return indicator.volatility(self.bmprice, date)
def sharpe(self, date=yesterdayobj()):
rp = self.total_annualized_returns(date)
return (rp - self.riskfree) / self.algorithm_volatility(date)
def information_ratio(self, date=yesterdayobj()):
rp = self.total_annualized_returns(date)
rm = self.benchmark_annualized_returns(date)
vp = indicator.ratedaily(self.price, date)
vm = indicator.ratedaily(self.bmprice, date)
diff = [vp[i] - vm[i] for i in range(len(vm))]
df = pd.DataFrame(data={"rate": diff})
var = df.std().rate
var = var * sqrt_days_in_year
return (rp - rm) / var
def max_drawdown(self, date=yesterdayobj()):
"""
回测时间段的最大回撤
:param date: date obj or string
:returns: three elements tuple, the first two are the date obj of
start and end of the time window, the third one is the drawdown amplitude in unit 1.
"""
li = [
(row["date"], row["netvalue"])
for i, row in self.price[self.price["date"] <= date].iterrows()
]
res = []
for i, _ in enumerate(li):
for j in range(i + 1, len(li)):
res.append((li[i][0], li[j][0], (li[j][1] - li[i][1]) / li[i][1]))
return min(res, key=lambda x: x[2])
## 以上基本为聚宽提供的整体量化指标,以下是其他短线技术面指标
def ma(self, window=5, col="netvalue"):
"""
移动平均线指标
give the moving average as a new column 'MA' in the price table, return None
:param window: the date window of the MA calculation
:param col: string, column name in dataframe you want to calculate
"""
self.price["MA" + str(window)] = self.price[col].rolling(window=window).mean()
def md(self, window=5, col="netvalue"):
"""
移动标准差指标
give the moving standard deviation as a new column 'MD' in the price table, return None
:param window: the date window of the MD calculation
:param col: string, column name in dataframe you want to calculate
"""
self.price["MD" + str(window)] = self.price[col].rolling(window=window).std()
def ema(self, window=5, col="netvalue"):
"""
指数平均数指标
give the exponential moving average as a new column 'EMA' in the price table, return None
:param window: the span of date, where the decay factor alpha=2/(1+window)
:param col: string, column name in dataframe you want to calculate
"""
self.price["EMA" + str(window)] = self.price[col].ewm(span=window).mean()
def macd(self, fast_window=12, slow_window=26, signal_window=9, col="netvalue"):
"""
指数平滑异同移动平均线
give the MACD index as three new columns 'MACD_DIFF/DEM/OSC' in the price table, return None
:param fast_window: int,
:param slow_window: int,
:param signal_window: int, the ema window of the signal line
:param col: string, column name in dataframe you want to calculate
"""
EMAfast = pd.Series(self.price[col].ewm(span=fast_window).mean())
EMAslow = pd.Series(self.price[col].ewm(span=slow_window).mean())
# 短期ema和长期ema的差
MACDDiff = pd.Series(EMAfast - EMAslow)
# 该差的再次 ema 平均
MACDDem = pd.Series(MACDDiff.ewm(span=signal_window).mean())
# ema平均过的差和原来差的差
MACDOsc = pd.Series(MACDDiff - MACDDem)
self.price["MACD_DIFF_" + str(fast_window) + "_" + str(slow_window)] = MACDDiff
self.price["MACD_DEM_" + str(fast_window) + "_" + str(slow_window)] = MACDDem
self.price["MACD_OSC_" + str(fast_window) + "_" + str(slow_window)] = MACDOsc
def mtm(self, window=10, col="netvalue"):
"""
动量指标,并未附加动量的平均线指标,如需计算动量平均线指标,使用ma或emca函数,col参数选择MTM列即可
give the MTM as a new column 'MTM' in the price table, return None
:param window: int, the difference between price now and window days ago
:param col: string, column name in dataframe you want to calculate
"""
self.price["MTM" + str(window)] = self.price[col].diff(window)
def roc(self, window=10, col="netvalue"):
"""
变动率指标
give the ROC as a new column 'ROC' in the price table, return None, the ROC is in the unit of 1 instead of 1%
:param window: int, the change rate between price now and window days ago
:param col: string, column name in dataframe you want to calculate
| |
<reponame>idevopscloud/python-kubernetes
#!/usr/bin/env python
#
# Copyright 2014 tigmi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kubernetes import simplejson
from kubernetes import TypeMeta
class Status(TypeMeta):
"""A Class representing the Status structure used by the kubernetes API
Status is a return value for calls that don't return other objects.
TODO: this could go in apiserver, but I'm including it here so clients needn't
import both.
The Status structure exposes the following properties:
Status.Status
Status.Message
Status.Reason
Status.Details
Status.Code
"""
def __init__(self, **kwargs):
'''An object to hold a Kubernete Status.
Arg:
Status:
One of: "Success", "Failure", "Working" (for operations not yet completed)
Message:
A human-readable description of the status of this operation.
Reason:
A machine-readable description of why this operation is in the
"Failure" or "Working" status. If this value is empty there
is no information available. A Reason clarifies an HTTP status
code but does not override it.
Details:
Extended data associated with the reason. Each reason may define its
own extended details. This field is optional and the data returned
is not guaranteed to conform to any schema except that defined by
the reason type.
Code:
Suggested HTTP return code for this status, 0 if not set.
'''
param_defaults = {
'Status': None,
'Message': None,
'Reason': None,
'Details': None,
'Code': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
super(Status, self).__init__(**kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.Status == other.Status and \
self.Message == other.Message and \
self.Reason == other.Reason and \
self.Details == other.Details and \
self.Code == other.Code and \
super(Status, self).__eq__(other)
except AttributeError:
return False
def __str__(self):
'''A string representation of this Kubernetes.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.Status instance.
Returns:
A JSON string representation of this kubernetes.Status instance.
'''
return simplejson.dumps(dict(self.AsDict().items()+super(Status, self).AsDict().items()), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.Status instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.Status instance
'''
data = {}
if self.Status:
data['status'] = self.Status
if self.Message:
data['message'] = self.Message
if self.Reason:
data['reason'] = self.Reason
if self.Details:
data['details'] = self.Details.AsDict()
if self.Code:
data['code'] = self.Code
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.Status instance
'''
details = None
if 'details' in data:
from kubernetes import StatusDetails
details = StatusDetails.NewFromJsonDict(data['details'])
return Status(
Kind=data.get('kind', None),
ID=data.get('id', None),
UID=data.get('uid', None),
CreationTimestamp=data.get('creationTimestamp', None),
SelfLink=data.get('selfLink', None),
ResourceVersion=data.get('resourceVersion', None),
APIVersion=data.get('apiVersion', None),
Namespace=data.get('namespace', None),
Annotations=data.get('annotations', None),
Status=data.get('status', None),
Message=data.get('message', None),
Reason=data.get('reason', None),
Details=details,
Code=data.get('code', None))
class StatusDetails(object):
"""A Class representing the StatusDetails structure used by the kubernetes API
StatusDetails is a set of additional properties that MAY be set by the
server to provide additional information about a response. The Reason
field of a Status object defines what attributes will be set. Clients
must ignore fields that do not match the defined type of each attribute,
and should assume that any attribute may be empty, invalid, or under
defined.
The StatusDetails structure exposes the following properties:
StatusDetails.ID
StatusDetails.Kind
StatusDetails.Causes
"""
def __init__(self, **kwargs):
'''An object to hold a Kubernete StatusDetails.
Arg:
ID:
The ID attribute of the resource associated with the status StatusReason
(when there is a single ID which can be described).
Kind:
The kind attribute of the resource associated with the status StatusReason.
On some operations may differ from the requested resource Kind.
Causes:
The Causes array includes more details associated with the StatusReason
failure. Not all StatusReasons may provide detailed causes.
'''
param_defaults = {
'ID': None,
'Kind': None,
'Causes': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.ID == other.ID and \
self.Kind == other.Kind and \
self.Causes == other.Causes
except AttributeError:
return False
def __str__(self):
'''A string representation of this Kubernetes.StatusDetails instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.StatusDetails instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.StatusDetails instance.
Returns:
A JSON string representation of this kubernetes.StatusDetails instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.StatusDetails instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.StatusDetails instance
'''
data = {}
if self.ID:
data['id'] = self.ID
if self.Kind:
data['kind'] = self.Kind
if self.Causes:
data['causes'] = [cause.AsDict() for cause in self.Causes]
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.StatusDetails instance
'''
causes = None
if 'causes' in data:
from kubernetes import StatusCause
causes = [StatusCause.NewFromJsonDict(cause) for cause in data['causes']]
return StatusDetails(
ID=data.get('id', None),
Kind=data.get('kind', None),
Causes=causes)
class StatusReason(object):
"""
StatusReason is an enumeration of possible failure causes. Each StatusReason
must map to a single HTTP status code, but multiple reasons may map
to the same HTTP status code.
TODO: move to apiserver
"""
'''
StatusReasonUnknown means the server has declined to indicate a specific reason.
The details field may contain other information about this error.
Status code 500.
'''
StatusReasonUnknown = ""
'''
StatusReasonWorking means the server is processing this request and will complete
at a future time.
Details (optional):
"kind" string - the name of the resource being referenced ("operation" today)
"id" string - the identifier of the Operation resource where updates
will be returned
Headers (optional):
"Location" - HTTP header populated with a URL that can retrieved the final
status of this operation.
Status code 202
'''
StatusReasonWorking = "StatusReasonWorking"
'''
StatusReasonNotFound means one or more resources required for this operation
could not be found.
Details (optional):
"kind" string - the kind attribute of the missing resource
on some operations may differ from the requested
resource.
"id" string - the identifier of the missing resource
Status code 404
'''
StatusReasonNotFound = "NotFound"
'''
StatusReasonAlreadyExists means the resource you are creating already exists.
Details (optional):
"kind" string - the kind attribute of the conflicting resource
"id" string - the identifier of the conflicting resource
Status code 409
'''
StatusReasonAlreadyExists = "AlreadyExists"
'''
StatusReasonConflict means the requested update operation cannot be completed
due to a conflict in the operation. The client may need to alter the request.
Each resource may define custom details that indicate the nature of the
conflict.
Status code 409
'''
StatusReasonConflict = "Conflict"
'''
StatusReasonInvalid means the requested create or update operation cannot be
completed due to invalid data provided as part of the request. The client may
need to alter the request. When set, the client may use the StatusDetails
message field as a summary of the issues encountered.
Details (optional):
"kind" string - the kind attribute of the invalid resource
"id" string - the identifier of the invalid resource
"causes" - one or more StatusCause entries indicating the data in the
provided resource that was invalid. The code, message, and
field attributes will be set.
Status code 422
'''
StatusReasonInvalid = "Invalid"
class StatusCause(object):
"""A Class representing the StatusCause | |
name of the environment (`None` for the base environment)
name: Optional[str]
@property
def conda_exe(self) -> Path:
"""The path to the Conda executable"""
if ON_WINDOWS:
return self.basepath / "Scripts" / "conda.exe"
else:
return self.basepath / "bin" / "conda"
@property
def bindir(self) -> Path:
"""
The directory in which command-line programs provided by packages are
installed
"""
dirname = "Scripts" if ON_WINDOWS else "bin"
if self.name is None:
return self.basepath / dirname
else:
return self.basepath / "envs" / self.name / dirname
#: A list of command names and the paths at which they are located
CommandList = List[Tuple[str, Path]]
class DataladInstaller:
"""The script's primary class, a manager & runner of components"""
COMPONENTS: ClassVar[Dict[str, Type["Component"]]] = {}
OPTION_PARSER = OptionParser(
help="Installation script for Datalad and related components",
options=[
Option(
"-V",
"--version",
is_flag=True,
immediate=VersionRequest(),
help="Show program version and exit",
),
Option(
"-l",
"--log-level",
converter=parse_log_level,
metavar="LEVEL",
help="Set logging level [default: INFO]",
),
Option(
"-E",
"--env-write-file",
converter=Path,
multiple=True,
help=(
"Append PATH modifications and other shell commands to the"
" given file; can be given multiple times"
),
),
Option(
"--sudo",
choices=[v.value for v in SudoConfirm],
converter=SudoConfirm,
help="How to handle sudo commands [default: ask]",
),
],
)
def __init__(
self,
env_write_files: Optional[List[Union[str, os.PathLike]]] = None,
sudo_confirm: SudoConfirm = SudoConfirm.ASK,
) -> None:
#: A list of files to which to write ``PATH`` modifications and related
#: shell commands
self.env_write_files: List[Path]
if env_write_files is None:
self.env_write_files = []
else:
self.env_write_files = [Path(p) for p in env_write_files]
self.sudo_confirm: SudoConfirm = sudo_confirm
#: The default installers to fall back on for the "auto" installation
#: method
self.installer_stack: List["Installer"] = [
# Lowest priority first
DataladPackagesBuildInstaller(self),
AutobuildInstaller(self),
HomebrewInstaller(self),
NeurodebianInstaller(self),
AptInstaller(self),
CondaInstaller(self),
]
#: A stack of Conda installations & environments installed via the
#: instance
self.conda_stack: List[CondaInstance] = []
#: A list of commands installed via the instance
self.new_commands: CommandList = []
#: Whether "brew update" has been run
self.brew_updated: bool = False
@classmethod
def register_component(
cls, name: str
) -> Callable[[Type["Component"]], Type["Component"]]:
"""A decorator for registering concrete `Component` subclasses"""
def decorator(component: Type["Component"]) -> Type["Component"]:
cls.COMPONENTS[name] = component
return component
return decorator
def __enter__(self) -> "DataladInstaller":
return self
def __exit__(self, exc_type: Any, _exc_value: Any, _exc_tb: Any) -> None:
if exc_type is None:
# Ensure env write files at least exist
for p in self.env_write_files:
p.touch()
def ensure_env_write_file(self) -> None:
"""If there are no env write files registered, add one"""
if not self.env_write_files:
fd, fpath = tempfile.mkstemp(prefix="dl-env-", suffix=".sh")
os.close(fd)
log.info("Writing environment modifications to %s", fpath)
self.env_write_files.append(Path(fpath))
def sudo(self, *args: Any, **kwargs: Any) -> None:
arglist = [str(a) for a in args]
cmd = " ".join(map(shlex.quote, arglist))
if ON_WINDOWS:
# The OS will ask the user for confirmation anyway, so there's no
# need for us to ask anything.
log.info("Running as administrator: %s", " ".join(arglist))
ctypes.windll.shell32.ShellExecuteW( # type: ignore[attr-defined]
None, "runas", arglist[0], " ".join(arglist[1:]), None, 1
)
else:
if self.sudo_confirm is SudoConfirm.ERROR:
log.error("Not running sudo command: %s", cmd)
sys.exit(1)
elif self.sudo_confirm is SudoConfirm.ASK:
print("About to run the following command as an administrator:")
print(f" {cmd}")
yan = ask("Proceed?", ["y", "a", "n"])
if yan == "n":
sys.exit(0)
elif yan == "a":
self.sudo_confirm = SudoConfirm.OK
runcmd("sudo", *args, **kwargs)
def run_maybe_elevated(self, *args: Any, **kwargs: Any) -> None:
try:
runcmd(*args, **kwargs)
except OSError as e:
if e.winerror == 740: # type: ignore[attr-defined]
log.info("Operation requires elevation; rerunning as administrator")
self.sudo(*args, **kwargs)
else:
raise
@classmethod
def parse_args(cls, args: List[str]) -> Union[Immediate, ParsedArgs]:
"""
Parse all command-line arguments.
:param List[str] args: command-line arguments without ``sys.argv[0]``
"""
r = cls.OPTION_PARSER.parse_args(args)
if isinstance(r, Immediate):
return r
global_opts, leftovers = r
components: List[ComponentRequest] = []
while leftovers:
c = leftovers.pop(0)
name, eq, version = c.partition("=")
if not name:
raise UsageError("Component name must be nonempty")
try:
component = cls.COMPONENTS[name]
except KeyError:
raise UsageError(f"Unknown component: {name!r}")
cparser = component.OPTION_PARSER
if version and not cparser.versioned:
raise UsageError(f"{name} component does not take a version", name)
if eq and not version:
raise UsageError("Version must be nonempty", name)
cr = cparser.parse_args(leftovers)
if isinstance(cr, Immediate):
return cr
kwargs, leftovers = cr
if version:
kwargs["version"] = version
components.append(ComponentRequest(name=name, **kwargs))
return ParsedArgs(global_opts, components)
def main(self, argv: Optional[List[str]] = None) -> int:
"""
Parsed command-line arguments and perform the requested actions.
Returns 0 if everything was OK, nonzero otherwise.
:param List[str] argv: command-line arguments, including
``sys.argv[0]``
"""
if argv is None:
argv = sys.argv
progname, *args = argv
if not progname:
progname = "datalad-installer"
else:
progname = Path(progname).name
try:
r = self.parse_args(args)
except UsageError as e:
print(self.short_help(progname, e.component), file=sys.stderr)
print(file=sys.stderr)
print(str(e), file=sys.stderr)
return 2
if isinstance(r, VersionRequest):
print("datalad-installer", __version__)
return 0
elif isinstance(r, HelpRequest):
print(self.long_help(progname, r.component))
return 0
else:
assert isinstance(r, ParsedArgs)
global_opts, components = r
if not components:
components = [ComponentRequest("datalad")]
logging.basicConfig(
format="%(asctime)s [%(levelname)-8s] %(name)s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S%z",
level=global_opts.pop("log_level", logging.INFO),
)
if global_opts.get("env_write_file"):
self.env_write_files.extend(global_opts["env_write_file"])
self.ensure_env_write_file()
if global_opts.get("sudo"):
self.sudo_confirm = global_opts["sudo"]
for cr in components:
self.addcomponent(name=cr.name, **cr.kwargs)
ok = True
for name, path in self.new_commands:
log.info("%s is now installed at %s", name, path)
if not os.path.exists(path):
log.error("%s does not exist!", path)
ok = False
elif not ON_WINDOWS and not os.access(path, os.X_OK):
log.error("%s is not executable!", path)
ok = False
else:
try:
sr = subprocess.run(
[str(path), "--help"], stdout=subprocess.DEVNULL
)
except Exception as e:
log.error("Failed to run `%s --help`: %s", path, e)
ok = False
else:
if sr.returncode != 0:
log.error("`%s --help` command failed!", path)
ok = False
return 0 if ok else 1
def addenv(self, line: str) -> None:
"""Write a line to the env write files"""
log.debug("Adding line %r to env_write_files", line)
for p in self.env_write_files:
with p.open("a") as fp:
print(line, file=fp)
def addpath(self, p: Union[str, os.PathLike], last: bool = False) -> None:
"""
Add a line to the env write files that prepends (or appends, if
``last`` is true) a given path to ``PATH``
"""
path = Path(p).resolve()
if not last:
line = f'export PATH={shlex.quote(str(path))}:"$PATH"'
else:
line = f'export PATH="$PATH":{shlex.quote(str(path))}'
self.addenv(line)
def addcomponent(self, name: str, **kwargs: Any) -> None:
"""Provision the given component"""
try:
component = self.COMPONENTS[name]
except AttributeError:
raise ValueError(f"Unknown component: {name}")
component(self).provide(**kwargs)
def get_conda(self) -> CondaInstance:
"""
Return the most-recently created Conda installation or environment. If
there is no such instance, return an instance for an
externally-installed Conda installation, raising an error if none is
found.
"""
if self.conda_stack:
return self.conda_stack[-1]
else:
conda_path = shutil.which("conda")
if conda_path is not None:
basepath = Path(readcmd(conda_path, "info", "--base").strip())
return CondaInstance(basepath=basepath, name=None)
else:
raise RuntimeError("conda not installed")
@classmethod
def short_help(cls, progname: str, component: Optional[str] = None) -> str:
if component is None:
return cls.OPTION_PARSER.short_help(progname)
else:
return cls.COMPONENTS[component].OPTION_PARSER.short_help(progname)
@classmethod
def long_help(cls, progname: str, component: Optional[str] = None) -> str:
if component is None:
s = cls.OPTION_PARSER.long_help(progname)
s += "\n\nComponents:"
width = max(map(len, cls.COMPONENTS.keys()))
for name, cmpnt in sorted(cls.COMPONENTS.items()):
if cmpnt.OPTION_PARSER.help is not None:
chelp = cmpnt.OPTION_PARSER.help
else:
chelp = ""
s += (
f"\n{' ' * HELP_INDENT}{name:{width}}{' ' * HELP_GUTTER}"
+ textwrap.shorten(chelp, HELP_WIDTH - width - HELP_GUTTER)
)
return s
else:
return cls.COMPONENTS[component].OPTION_PARSER.long_help(progname)
class Component(ABC):
"""
An abstract base class for a component that can be specified on the command
line and provisioned
"""
OPTION_PARSER: ClassVar[OptionParser]
def __init__(self, manager: DataladInstaller) -> None:
self.manager = manager
@abstractmethod
def provide(self, **kwargs: Any) -> None:
...
@DataladInstaller.register_component("venv")
class VenvComponent(Component):
"""Creates a Python virtual environment using ``python -m venv``"""
OPTION_PARSER = OptionParser(
"venv",
versioned=False,
help="Create a Python virtual environment",
options=[
Option(
"--path",
converter=Path,
metavar="PATH",
help="Create the venv at the given path",
),
Option(
"-e",
"--extra-args",
converter=shlex.split,
help="Extra arguments to pass to the venv command",
),
# For use in testing against the dev version of pip:
Option(
"--dev-pip",
is_flag=True,
help="Install the development version of pip from GitHub",
),
],
)
def provide(
self,
path: Optional[Path] = None,
extra_args: Optional[List[str]] = None,
dev_pip: bool = False,
**kwargs: Any,
) -> None:
log.info("Creating a virtual environment")
if path is None:
path = mktempdir("dl-venv-")
log.info("Path: %s", path)
log.info("Extra args: %s", extra_args)
if kwargs:
log.warning("Ignoring extra component arguments: %r", kwargs)
### TODO: Handle systems on which venv isn't installed
cmd = | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 13:44:19 2018
@author: sven
"""
import scipy.stats as sps
import scipy.spatial as spp
import numpy as np
import copy
from ..utils.utils import MyException
def trim_mean(x,trimming,axis=0):
"""
computes the trimmed mean of array x according to axis.
Input :
x : input data as numpy array
trimming, float : trimming percentage to be used
axis, int or None : Axis along which the trimmed means are computed
Output:
The trimmed mean of x according to axis.
"""
if trimming == 0:
return(np.mean(x,axis=axis))
else:
return(sps.trim_mean(x,trimming,axis=axis))
def trimvar(x,trimming):
"""
computes the trimmed variance of array x .
Input :
x : input data as numpy array
trimming, float : trimming percentage to be used
Output:
The trimmed variance of x.
"""
# division by n
return(sps.trim_mean(np.square(x - sps.trim_mean(x,trimming)),trimming))
def identity(x):
return(x)
def trim_mom(x,y,locest,order,trimming,option,fscorr=True):
"""
computes trimmed comoment between x and y. order represents the order of
the comoment.
input :
x : Input data as matrix
y : Input data as matrix or 1d vector
order, int : order of the comoment
trimming, float : trimming percentage to be used.
option, int : option to select the type of co-moment (order 3: option 1 = com(x,x,y))
fscor, bool: if True, a finite sample correction is applied to the comoment.
output :
the trimmed comoment between x and y
"""
# division by n
if order == 0:
como = 0
elif order == 1:
como = locest(x,trimming)
else:
if order > 2:
iter_stop_2 = option
iter_stop_1 = order - option
else:
iter_stop_1 = 1
iter_stop_2 = 1
if locest == np.median:
trimming = 0
factor = 1
if (x==y).all():
wrapper = abs
power = 1/order
if power == 0.5:
factor = 1.4826
else:
wrapper = identity
power = 1
else:
n = len(x)
wrapper = identity
power = 1
if fscorr:
ntrim = round(n * (1-trimming))
factor = ntrim
factor /= np.product(ntrim - np.arange(max(1,order-2),order))
else:
factor = 1
xc = wrapper(x - locest(x,trimming))
yc = wrapper(y - locest(y,trimming))
factor1 = np.power(xc,iter_stop_1)
factor2 = np.power(yc,iter_stop_2)
como = locest(np.power(np.multiply(factor1,factor2),power),trimming)*factor
# como = sps.trim_mean(np.multiply(x - sps.trim_mean(x,trimming),y - sps.trim_mean(y,trimming)),trimming)*ntrim/(ntrim-1)
if len(como.shape)>1:
como = como[0,0]
else:
if type(como) is np.ndarray:
como = como[0]
return(como)
def double_center_flex(a, center='mean', **kwargs):
"""
Double centered function adapted to accommodate for location types different
from mean.
Input :
a : input data as matrix
center, str : which location estimate to use for centering. either 'mean or 'median'
kwargs :
trimming, float : trimming percentage to be used.
biascorr, bool : if True, bias correction is applied during double centering.
Output :
The double centered version of the matrix a.
"""
# print(kwargs)
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
# print('trimming is: ' + str(trimming))
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
out = copy.deepcopy(a)
dim = np.size(a, 0)
n1 = dim
# mu = np.sum(a) / (dim * dim)
if center=='mean':
mu = trim_mean(a.reshape((dim**2,1)),trimming)
if biascorr:
n1 = np.round(dim*(1-trimming))
# print(n1)
mu *= (n1**2) / ((n1-1) * (n1-2))
mu_cols = trim_mean(a, trimming, axis=0).reshape((1,dim))
mu_rows = trim_mean(a, trimming, axis=1).reshape((dim,1))
if biascorr:
mu_cols *= n1/(n1 - 2)
mu_rows *= n1/(n1 - 2)
mu_cols = np.ones((dim, 1)).dot(mu_cols)
mu_rows = mu_rows.dot(np.ones((1, dim)))
elif center=='median':
mu = np.median(a.reshape((dim**2,1)))
mu_cols = np.median(a,axis=0).reshape((1,dim))
mu_rows = np.median(a,axis=1).reshape((dim,1))
mu_cols = np.ones((dim, 1)).dot(mu_cols)
mu_rows = mu_rows.dot(np.ones((1, dim)))
else:
raise(ValueError('Center should be mean or median'))
# Do one operation at a time, to improve broadcasting memory usage.
out -= mu_rows
out -= mu_cols
out += mu
if biascorr:
out[np.eye(dim, dtype=bool)] = 0
return out,n1
def distance_matrix_centered(x,**kwargs):
"""
Computes the trimmed double centered distance matrix of x.
Input :
x : input data as matrix.
kwargs :
trimming, float : trimming percentage to be used.
biascorr, bool : if True, bias correction is applied during double centering.
center, str : which location estimate to use for centering. either 'mean or 'median'
dmetric, str : which distance metric to use. Default is euclidean distance.
Output :
the trimmed double centered distance matrix of x
"""
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'center' not in kwargs:
center = 'mean'
else:
center = kwargs.get('center')
if 'dmetric' not in kwargs:
dmetric = 'euclidean'
else:
dmetric = kwargs.get('dmetric')
dx = spp.distance.squareform(spp.distance.pdist(x,metric=dmetric))
dmx, n1 = double_center_flex(dx,biascorr=biascorr,
trimming=trimming,center=center)
return dmx,n1
def distance_moment(dmx,dmy,**kwargs):
"""
Computes the trimmed distance comoment between x and y based on their distance matrices.
Input :
dmx : distance matrix of x
dmy : distance matrix of y
kwargs :
trimming, float : trimming percentage to be used.
biascorr, bool : if True, bias correction is applied during double centering.
center, str : which location estimate to use for centering. either 'mean or 'median'
dmetric, str : which distance metric to use. Default is euclidean distance.
order, int : order of the comoment to be computed, default is 2 for covariance.
option, int : option to be used during the computation.
Output :
The trimmed distance comoment between x and y
"""
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'center' not in kwargs:
center = 'mean'
else:
center = kwargs.get('center')
if 'order' not in kwargs:
order = 2
else:
order = kwargs.get('order')
if order > 2:
if 'option' not in kwargs:
option = 1
else:
option = kwargs.get('option')
iter_stop_2 = option
iter_stop_1 = order - option
else:
option = 0
iter_stop_1 = 1
iter_stop_2 = 1
nx = dmx.shape[0]
ny = dmy.shape[0]
if nx!=ny:
raise(ValueError)
if biascorr:
if trimming == 0:
n1 = nx
elif 'n1' not in kwargs:
raise(MyException('n1 needs to be provided when correcting for bias'))
else:
n1 = kwargs.get('n1')
corr4bias = n1**2/(n1*(n1-3))
else:
corr4bias = 1
if order>2:
i = 1
while i < iter_stop_1:
dmx *= dmx
i += 1
i = 1
while i < iter_stop_2:
dmy *= dmy
i += 1
if center=='mean':
moment = trim_mean((dmx*dmy).reshape((nx**2,1)),trimming)
moment *= corr4bias
moment = moment[0]
moment = (-1)**order*abs(moment)**(1/order)
elif center=='median':
moment = np.median(dmx*dmy)
return(moment)
def difference_divergence(X,Y,**kwargs):
"""
This function computes the (U)Martingale Difference Divergence of Y given X.
input :
X : A matrix or data frame, where rows represent samples, and columns represent variables.
Y : The response variable or matrix.
biascorr, bool : if True, uses U centering to produce an unbiased estimator of MDD
output:
returns the squared martingale difference divergence of Y given X.
"""
if 'trimming' not in kwargs:
trimming = 0
else:
trimming = kwargs.get('trimming')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'center' not in kwargs:
center = 'mean'
else:
center = kwargs.get('center')
if 'dmetric' not in kwargs:
dmetric = 'euclidean'
else:
dmetric = kwargs.get('dmetric')
A, Adim = distance_matrix_centered(X,biascorr=biascorr,trimming=trimming,center=center)
dy= spp.distance.squareform(spp.distance.pdist(Y.reshape(-1, 1),metric=dmetric)**2)
B,Bdim = double_center_flex(0.5*dy,biascorr=biascorr,trimming=trimming,center=center)
if biascorr:
return(U_inner(A,B,trimming))
else:
return(D_inner(A,B,trimming))
def U_inner(X,Y,trimming=0):
"""
Computes the inner product in the space of U centered matrices, between matrices X and Y.
The matrices have to be square matrices.
"""
nx = X.shape[0]
ny = Y.shape[0]
if nx != ny:
raise(MyException('Please feed x and y data of equal length'))
#((1/(nx*(nx-3))) *(np.sum(arr)))
arr= np.multiply(X,Y)
arr=arr.flatten()
lowercut = int(trimming * | |
is written into output_tar (as a tar file)
"""
stopwatch.sw.start('write_output')
base.MakeDirs(os.path.join(self._temp_dir, OUTPUT_DIR))
base.MakeDirs(os.path.join(self._temp_dir, MODIFIED_DIR))
for file_obj in self.files:
# We want to be able to show all the modifications in one place.
# Therefore, each file shows up in mutliple places.
# 0) the output tree
# 1) the tree of original copies (if modified)
# 2) the tree of modified versions (if modified)
# 3) the diff between original and modified (if modified)
# 4) the initial source tree we were asked to modify (if modify in place)
# 5) the tarball of the output
# 0: write the possibly-modified file to output tree
if file_obj.is_deleted:
modified_filename = '/dev/null'
else:
output_filename = os.path.join(
self._temp_dir,
OUTPUT_DIR,
file_obj.output_relative_filename)
base.MakeDirs(os.path.dirname(output_filename))
file_obj.WriteToFile(output_filename)
if file_obj.is_modified:
# 1: write the original file to the originals tree
original_filename = os.path.join(
self._temp_dir,
ORIGINAL_DIR,
file_obj.relative_filename)
base.MakeDirs(os.path.dirname(original_filename))
file_obj.WriteToFile(original_filename, original=True)
# 2: write the modified file to the modified tree
if file_obj.is_deleted:
modified_filename = '/dev/null'
else:
modified_filename = os.path.join(
self._temp_dir,
MODIFIED_DIR,
file_obj.output_relative_filename)
base.MakeDirs(os.path.dirname(modified_filename))
file_obj.WriteToFile(modified_filename)
# 3: write the diff
diff_filename = os.path.join(
self._temp_dir,
DIFFS_DIR,
file_obj.relative_filename)
base.MakeDirs(os.path.dirname(diff_filename))
p = subprocess.Popen(
['diff', original_filename, modified_filename],
stdout=open(diff_filename, 'w'),
stderr=open('/dev/null', 'w'))
p.wait()
if self.config.modify:
# 4: write the modified file to the initial tree
if file_obj.is_deleted:
os.remove(file_obj.filename)
print 'Deleted', file_obj.filename
else:
tmp_filename = file_obj.filename + '.tmp'
file_obj.WriteToFile(tmp_filename)
os.rename(tmp_filename, file_obj.filename)
print 'Modified', file_obj.filename
# 5: create output tar
if self.config.output_tar:
# Calling out to tar instead of using python's tarfile is 400x faster.
p = subprocess.Popen(
['tar', '-cf', self.config.output_tar,
'-C', os.path.join(self._temp_dir, OUTPUT_DIR), '.'])
p.wait()
if p.returncode:
self.AddError('tar finished unsuccessfully')
stopwatch.sw.stop('write_output')
def CleanUp(self):
shutil.rmtree(self._temp_dir, ignore_errors=True)
def RelativeFilename(self, filename):
result = os.path.abspath(filename).replace(
self.config.codebase, '', 1)
if result[0] == '/':
result = result[1:]
return result
def FindFiles(self, config):
"""Find all files to scrub in the codebase.
Args:
config: ScrubberConfig
Returns:
seq of ScannedFile, the filenames to scan
"""
result = []
stopwatch.sw.start('find')
if config.rearranging_config:
file_renamer = renamer.FileRenamer(config.rearranging_config)
else:
file_renamer = None
for full_filename in config.input_files:
relative_filename = self.RelativeFilename(full_filename)
if self.config.ignore_files_re.search(relative_filename):
continue
if file_renamer:
output_relative_filename = file_renamer.RenameFile(relative_filename)
else:
output_relative_filename = relative_filename
result.append(ScannedFile(
full_filename, relative_filename, self.GetScratchDir(),
output_relative_filename=output_relative_filename))
stopwatch.sw.stop('find')
return result
def _GetExtension(self, filename):
basename = os.path.basename(filename)
for filename_re, extension in self.config.extension_map:
if filename_re.search(filename):
return extension
_, extension = os.path.splitext(basename)
# If there is no extension, then it may be a dotfile, such as .hgignore.
if not extension and filename.startswith('.'):
return filename
return extension
def ShouldScrubFile(self, file_obj):
if (file_obj.IsBinaryFile() or
self.config.do_not_scrub_files_re.search(file_obj.relative_filename)):
return False
return True
def ScrubbersForFile(self, file_obj):
"""Return a seq of base.FileScrubber's appropriate for file_obj."""
extension = self._GetExtension(file_obj.relative_filename)
scrubbers = self.config.extension_to_scrubber_map.get(extension, None)
if scrubbers is not None:
return scrubbers
if os.path.basename(file_obj.filename) not in self.config.known_filenames:
self._unscrubbed_file_extensions.add(extension)
return self.config.default_scrubbers
def _RunPreBatchScrubbers(self, file_objs):
self._RunBatchScrubbers(self.config.extension_to_pre_batch_scrubbers_map,
file_objs)
def _RunPostBatchScrubbers(self, file_objs):
self._RunBatchScrubbers(self.config.extension_to_post_batch_scrubbers_map,
file_objs)
def _RunBatchScrubbers(self, batch_scrubbers_map, file_objs):
files_by_extension = {}
for file_obj in file_objs:
ext = self._GetExtension(file_obj.relative_filename)
files_by_extension[ext] = files_by_extension.get(ext, []) + [file_obj]
for (ext, batch_scrubbers) in batch_scrubbers_map.iteritems():
for batch_scrubber in batch_scrubbers:
if ext in files_by_extension:
batch_scrubber.BatchScrubFiles(files_by_extension[ext], self)
def Scan(self):
files_to_scrub = [file_obj for file_obj in self.files if
self.ShouldScrubFile(file_obj)]
sys.stdout.write('Running initial batch scrubbers...\n')
sys.stdout.flush()
self._RunPreBatchScrubbers(files_to_scrub)
for file_obj in files_to_scrub:
scrubbers = self.ScrubbersForFile(file_obj)
for scrubber in scrubbers:
if file_obj.is_deleted:
# No need to further scrub a deleted file
break
scrubber.ScrubFile(file_obj, self)
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.write('Running final batch scrubbers...\n')
sys.stdout.flush()
self._RunPostBatchScrubbers(files_to_scrub)
# Top-level scrubber config keys.
_SCRUBBER_CONFIG_KEYS = [
# General options
u'ignore_files_re',
u'do_not_scrub_files_re',
u'extension_map',
u'sensitive_string_file',
u'sensitive_words',
u'sensitive_res',
u'whitelist',
u'scrub_sensitive_comments',
u'rearranging_config',
u'string_replacements',
u'regex_replacements',
u'scrub_non_documentation_comments',
u'scrub_all_comments',
# User options
u'usernames_to_scrub',
u'usernames_to_publish',
u'usernames_file',
u'scrub_unknown_users',
u'scrub_authors',
# C/C++ options
u'c_includes_config_file',
# Java options
u'empty_java_file_action',
u'maximum_blank_lines',
u'scrub_java_testsize_annotations',
u'java_renames',
# Javascript options
# Note: js_directory_rename is deprecated in favor of js_directory_renames,
# which supports multiple rename requests.
# TODO(user): Remove the old one after all config files have been changed.
u'js_directory_rename',
u'js_directory_renames',
# Python options
u'python_module_renames',
u'python_module_removes',
u'python_shebang_replace',
# GWT options
u'scrub_gwt_inherits',
# proto options
u'scrub_proto_comments',
]
def ScrubberConfigFromJson(codebase,
input_files,
config_json,
extension_to_scrubber_map=None,
default_scrubbers=None,
modify=False,
output_tar='',
temp_dir='',
**unused_kwargs):
"""Generate a ScrubberConfig object from a ScrubberConfig JSON object."""
def SetOption(key, func=None):
"""Set an option in the config from JSON, using the enclosing scope.
Args:
key: unicode; the key in the JSON config and corresponding config
attribute name.
func: An optional transformation to apply to the JSON value before storing
in the config.
"""
if key in config_json:
value = config_json[key]
if func is not None:
value = func(value)
setattr(config, str(key), value)
config_utils.CheckJsonKeys('scrubber config', config_json,
_SCRUBBER_CONFIG_KEYS)
config = ScrubberConfig(codebase, input_files, extension_to_scrubber_map,
default_scrubbers, modify, output_tar, temp_dir)
# General options.
SetOption(u'ignore_files_re', func=re.compile)
SetOption(u'do_not_scrub_files_re', func=re.compile)
SetOption(u'sensitive_words')
config.sensitive_words = config_json.get(u'sensitive_words', [])
SetOption(u'extension_map', func=lambda m: [(re.compile(r), e) for r, e in m])
SetOption(u'sensitive_res')
sensitive_string_file = config_json.get(u'sensitive_string_file')
if sensitive_string_file:
sensitive_string_json = config_utils.ReadConfigFile(sensitive_string_file)
config_utils.CheckJsonKeys('sensitive string config', sensitive_string_json,
[u'sensitive_words', u'sensitive_res'])
config.sensitive_words.extend(
sensitive_string_json.get(u'sensitive_words', []))
config.sensitive_res.extend(sensitive_string_json.get(u'sensitive_res', []))
whitelist_entries = []
for entry in config_json.get(u'whitelist', []):
config_utils.CheckJsonKeys('whitelist entry', entry,
[u'filter', u'trigger', u'filename'])
whitelist_entries.append((entry.get(u'filter', ''),
entry.get(u'trigger', ''),
entry.get(u'filename', '')))
config.whitelist = whitelist.Whitelist(whitelist_entries)
SetOption(u'scrub_sensitive_comments')
SetOption(u'rearranging_config')
SetOption(u'string_replacements')
SetOption(u'regex_replacements')
SetOption(u'scrub_non_documentation_comments')
SetOption(u'scrub_all_comments')
# User options.
# TODO(dborowitz): Make the scrubbers pass unicode to the UsernameFilter.
# TODO(dborowitz): Make these names consistent so we can use SetOption.
strs = lambda us: [str(u) for u in us]
if u'usernames_to_publish' in config_json:
config.publishable_usernames = strs(config_json[u'usernames_to_publish'])
if u'usernames_to_scrub' in config_json:
config.scrubbable_usernames = strs(config_json[u'usernames_to_scrub'])
SetOption(u'usernames_file')
SetOption(u'scrub_unknown_users')
SetOption(u'scrub_authors')
SetOption(u'scrub_proto_comments')
# C/C++-specific options.
SetOption(u'c_includes_config_file')
# Java-specific options.
action_map = {
'IGNORE': base.ACTION_IGNORE,
'DELETE': base.ACTION_DELETE,
'ERROR': base.ACTION_ERROR,
}
SetOption(u'empty_java_file_action', func=lambda a: action_map[a])
SetOption(u'maximum_blank_lines')
SetOption(u'scrub_java_testsize_annotations')
config.java_renames = []
for rename in config_json.get(u'java_renames', []):
config_utils.CheckJsonKeys(
'java rename', rename,
[u'internal_package', u'public_package'])
config.java_renames.append(java_scrubber.JavaRenameScrubber(
rename[u'internal_package'], rename[u'public_package']))
# Javascript-specific options.
# TODO(user): Remove js_directory_rename after all config files have been
# migrated to use js_directory_renames.
js_directory_rename = config_json.get(u'js_directory_rename')
if js_directory_rename is not None:
config_utils.CheckJsonKeys('JS directory rename', js_directory_rename,
[u'internal_directory', u'public_directory'])
config.js_directory_renames.append(line_scrubber.JsDirectoryRename(
js_directory_rename[u'internal_directory'],
js_directory_rename[u'public_directory']))
js_directory_renames = config_json.get(u'js_directory_renames', [])
for js_directory_rename in js_directory_renames:
config_utils.CheckJsonKeys('JS directory rename', js_directory_rename,
[u'internal_directory', u'public_directory'])
config.js_directory_renames.append(line_scrubber.JsDirectoryRename(
js_directory_rename[u'internal_directory'],
js_directory_rename[u'public_directory']))
# Python-specific options.
config.python_module_renames = []
for rename in config_json.get(u'python_module_renames', []):
config_utils.CheckJsonKeys(
'python module rename', rename,
[u'internal_module', u'public_module', u'as_name'])
config.python_module_renames.append(python_scrubber.PythonModuleRename(
rename[u'internal_module'], rename[u'public_module'],
as_name=rename.get(u'as_name')))
# TODO(dborowitz): Find out why these are singleton protobufs; possibly
# flatten them.
config.python_module_removes = []
for remove in config_json.get(u'python_module_removes', []):
config_utils.CheckJsonKeys('python module removal', remove,
[u'import_module'])
config.python_module_removes.append(
python_scrubber.PythonModuleRemove(remove[u'import_module']))
python_shebang_replace = config_json.get(u'python_shebang_replace')
if python_shebang_replace is not None:
config_utils.CheckJsonKeys('python shebang replacement',
python_shebang_replace, [u'shebang_line'])
config.python_shebang_replace = python_scrubber.PythonShebangReplace(
python_shebang_replace[u'shebang_line'])
# GWT-specific options.
SetOption(u'scrub_gwt_inherits')
config.ResetScrubbers(extension_to_scrubber_map, default_scrubbers)
return config
class ScannedFile(object):
"""A ScannedFile is a file to be scrubbed.
Instance members:
filename: str, the full path to the file to be scanned
relative_filename: str, the filename relative to the codebase
output_relative_filename: str, the relative filename this file should have
in the output codebase. This allows us to
rearrange codebases during scrubbing.
is_modified: bool, whether this file has been modified during scrubbing
_contents: str, the file's current contents
_in_unicode: True if the file's contents is unicode text, False if it's
a binary file
_temp_dir: str, a temporary directory to use
is_deleted: bool, if the file has been deleted during scrubbing
"""
def __init__(self, filename, relative_filename, temp_dir,
output_relative_filename):
self.filename = filename
self.relative_filename = relative_filename
self.output_relative_filename = output_relative_filename
self.is_modified = False
self._contents = None
self._in_unicode = None
self._temp_dir = temp_dir
self.is_deleted = False
def _ReadContents(self, filename):
"""Read the contents of filename.
Args:
filename: str, the string to read the contents of
Returns:
(contents (as unicode or str), bool (whether the contents are unicode))
NB(dbentley): Here's as good a place as any to discuss scrubber's
handling of unicode.
The scrubber handles two kinds of files: those in UTF-8, and those not.
For those not in UTF-8, we believe that they're binary. This is
sufficient for our interests, because all our source files are in UTF-8.
We determine this by trying to read a file as UTF-8, and if it works we
keep it as UTF-8. Otherwise, we consider it binary.
We then have the contents as unicodes (not strs). We have to be careful
that we don't handle them as strings. Luckily, if we ever do handle them
as strings, they will not be able to encode to ascii and we will get an
exception. I.e., a rather loud | |
<filename>test_network.py
import tensorflow as tf
from utils import weights_spectral_norm
class STDFusionNet():
def feature_padding(self, x, kernel=3, stride=1, pad=1):
if (kernel - stride) % 2 == 0:
pad_top = pad
pad_bottom = pad
pad_left = pad
pad_right = pad
else:
pad_top = pad
pad_bottom = kernel - stride - pad_top
pad_left = pad
pad_right = kernel - stride - pad_left
x = tf.pad(x, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]])
return x
def vi_feature_extraction_network(self, vi_image, reader):
with tf.compat.v1.variable_scope('vi_extraction_network'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/conv1/b')))
input = self.feature_padding(vi_image, kernel=5, stride=1, pad=2)
conv1 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
# conv1 = tf.contrib.layers.batch_norm(conv1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
conv1 = tf.nn.leaky_relu(conv1)
block1_input = conv1
print("block1_input shape: ", block1_input.get_shape().as_list())
# state size: 16
with tf.compat.v1.variable_scope('block1'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block1/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block1/conv1/b')))
conv1 = tf.nn.conv2d(block1_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block1/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block1/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block1/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block1/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
print("conv3 shape: ", conv3.get_shape().as_list())
block1_output = tf.nn.leaky_relu(conv3 + block1_input)
block2_input = block1_output
with tf.compat.v1.variable_scope('block2'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block2/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block2/conv1/b')))
conv1 = tf.nn.conv2d(block2_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block2/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block2/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block2/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block2/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block2/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block2_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block2_output = tf.nn.leaky_relu(conv3 + identity_conv)
block3_input = block2_output
with tf.compat.v1.variable_scope('block3'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block3/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block3/conv1/b')))
conv1 = tf.nn.conv2d(block3_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block3/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block3/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block3/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/vi_extraction_network/block3/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/vi_extraction_network/block3/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block3_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block3_output = tf.nn.leaky_relu(conv3 + identity_conv)
encoding_feature = block3_output
return encoding_feature
def ir_feature_extraction_network(self, ir_image, reader):
with tf.compat.v1.variable_scope('ir_extraction_network'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/conv1/b')))
input = self.feature_padding(ir_image, kernel=5, stride=1, pad=2)
conv1 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
# conv1 = tf.contrib.layers.batch_norm(conv1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True)
conv1 = tf.nn.leaky_relu(conv1)
block1_input = conv1
# state size: 16
with tf.compat.v1.variable_scope('block1'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block1/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block1/conv1/b')))
conv1 = tf.nn.conv2d(block1_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block1/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block1/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block1/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block1/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
block1_output = tf.nn.leaky_relu(conv3 + block1_input)
block2_input = block1_output
with tf.compat.v1.variable_scope('block2'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block2/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block2/conv1/b')))
conv1 = tf.nn.conv2d(block2_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block2/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block2/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block2/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block2/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block2/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block2_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block2_output = tf.nn.leaky_relu(conv3 + identity_conv)
block3_input = block2_output
with tf.compat.v1.variable_scope('block3'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block3/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block3/conv1/b')))
conv1 = tf.nn.conv2d(block3_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block3/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block3/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block3/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/ir_extraction_network/block3/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/ir_extraction_network/block3/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block3_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block3_output = tf.nn.leaky_relu(conv3 + identity_conv)
encoding_feature = block3_output
return encoding_feature
def feature_reconstruction_network(self, feature, reader):
with tf.compat.v1.variable_scope('reconstruction_network'):
block1_input = feature
with tf.compat.v1.variable_scope('block1'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block1/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block1/conv1/b')))
conv1 = tf.nn.conv2d(block1_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block1/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block1/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block1/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block1/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block1/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block1_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block1_output = tf.nn.elu(conv3 + identity_conv)
block2_input = block1_output
with tf.compat.v1.variable_scope('block2'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block2/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block2/conv1/b')))
conv1 = tf.nn.conv2d(block2_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block2/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block2/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block2/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block2/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block2/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block2_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block2_output = tf.nn.elu(conv3 + identity_conv)
block3_input = block2_output
with tf.compat.v1.variable_scope('block3'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block3/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block3/conv1/b')))
conv1 = tf.nn.conv2d(block3_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block3/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block3/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block3/conv3/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block3/conv3/b')))
conv3 = tf.nn.conv2d(conv2, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
with tf.variable_scope('identity_conv'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block3/identity_conv/w')))
#weights = weights_spectral_norm(weights)
identity_conv = tf.nn.conv2d(block3_input, weights, strides=[1, 1, 1, 1], padding='VALID')
block3_output = tf.nn.leaky_relu(conv3 + identity_conv)
block4_input = block3_output
with tf.compat.v1.variable_scope('block4'):
with tf.compat.v1.variable_scope('conv1'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block4/conv1/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block4/conv1/b')))
conv1 = tf.nn.conv2d(block4_input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv1 = tf.nn.leaky_relu(conv1)
with tf.compat.v1.variable_scope('conv2'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block4/conv2/w')))
#weights = weights_spectral_norm(weights)
bias = tf.compat.v1.get_variable("b", initializer=tf.constant(reader.get_tensor(
'STMFusion_model/reconstruction_network/block4/conv2/b')))
input = self.feature_padding(conv1)
conv2 = tf.nn.conv2d(input, weights, strides=[1, 1, 1, 1], padding='VALID') + bias
conv2 = tf.nn.leaky_relu(conv2)
with tf.compat.v1.variable_scope('conv3'):
weights = tf.compat.v1.get_variable("w", initializer=tf.constant(
reader.get_tensor('STMFusion_model/reconstruction_network/block4/conv3/w')))
#weights = weights_spectral_norm(weights)
| |
one of {task_keys} "
f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
)
@classmethod
def print_config(cls, file=sys.stdout) -> None:
assert check_argument_types()
# Shows the config: e.g. python train.py asr --print_config
config = cls.get_default_config()
file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
@classmethod
def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
assert check_argument_types()
print(get_commandline_args(), file=sys.stderr)
if args is None:
parser = cls.get_parser()
args = parser.parse_args(cmd)
args.version = __version__
if args.pretrain_path is not None:
raise RuntimeError("--pretrain_path is deprecated. Use --init_param")
if args.print_config:
cls.print_config()
sys.exit(0)
cls.check_required_command_args(args)
# "distributed" is decided using the other command args
resolve_distributed_mode(args)
if not args.distributed or not args.multiprocessing_distributed:
cls.main_worker(args)
else:
assert args.ngpu > 1, args.ngpu
# Multi-processing distributed mode: e.g. 2node-4process-4GPU
# | Host1 | Host2 |
# | Process1 | Process2 | <= Spawn processes
# |Child1|Child2|Child1|Child2|
# |GPU1 |GPU2 |GPU1 |GPU2 |
# See also the following usage of --multiprocessing-distributed:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
if num_nodes == 1:
args.dist_master_addr = "localhost"
args.dist_rank = 0
# Single node distributed training with multi-GPUs
if (
args.dist_init_method == "env://"
and get_master_port(args.dist_master_port) is None
):
# Get the unused port
args.dist_master_port = free_port()
# Assume that nodes use same number of GPUs each other
args.dist_world_size = args.ngpu * num_nodes
node_rank = get_node_rank(args.dist_rank, args.dist_launcher)
# The following block is copied from:
# https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py
error_queues = []
processes = []
mp = torch.multiprocessing.get_context("spawn")
for i in range(args.ngpu):
# Copy args
local_args = argparse.Namespace(**vars(args))
local_args.local_rank = i
local_args.dist_rank = args.ngpu * node_rank + i
local_args.ngpu = 1
process = mp.Process(
target=cls.main_worker,
args=(local_args,),
daemon=False,
)
process.start()
processes.append(process)
error_queues.append(mp.SimpleQueue())
# Loop on join until it returns True or raises an exception.
while not ProcessContext(processes, error_queues).join():
pass
@classmethod
def main_worker(cls, args: argparse.Namespace):
assert check_argument_types()
# 0. Init distributed process
distributed_option = build_dataclass(DistributedOption, args)
# Setting distributed_option.dist_rank, etc.
distributed_option.init_options()
# NOTE(kamo): Don't use logging before invoking logging.basicConfig()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
if not distributed_option.distributed:
_rank = ""
else:
_rank = (
f":{distributed_option.dist_rank}/"
f"{distributed_option.dist_world_size}"
)
# NOTE(kamo):
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}{_rank}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
format=f"[{os.uname()[1].split('.')[0]}"
f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# Invoking torch.distributed.init_process_group
distributed_option.init_torch_distributed()
# 1. Set random-seed
set_all_random_seed(args.seed)
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
torch.backends.cudnn.deterministic = args.cudnn_deterministic
if args.detect_anomaly:
logging.info("Invoking torch.autograd.set_detect_anomaly(True)")
torch.autograd.set_detect_anomaly(args.detect_anomaly)
# 2. Build model
model = cls.build_model(args=args)
if not isinstance(model, AbsESPnetModel):
raise RuntimeError(
f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}"
)
model = model.to(
dtype=getattr(torch, args.train_dtype),
device="cuda" if args.ngpu > 0 else "cpu",
)
for t in args.freeze_param:
for k, p in model.named_parameters():
if k.startswith(t + ".") or k == t:
logging.info(f"Setting {k}.requires_grad = False")
p.requires_grad = False
# 3. Build optimizer
optimizers = cls.build_optimizers(args, model=model)
# 4. Build schedulers
schedulers = []
for i, optim in enumerate(optimizers, 1):
suf = "" if i == 1 else str(i)
name = getattr(args, f"scheduler{suf}")
conf = getattr(args, f"scheduler{suf}_conf")
if name is not None:
cls_ = scheduler_classes.get(name)
if cls_ is None:
raise ValueError(
f"must be one of {list(scheduler_classes)}: {name}"
)
scheduler = cls_(optim, **conf)
else:
scheduler = None
schedulers.append(scheduler)
logging.info(pytorch_cudnn_version())
logging.info(model_summary(model))
for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
suf = "" if i == 1 else str(i)
logging.info(f"Optimizer{suf}:\n{o}")
logging.info(f"Scheduler{suf}: {s}")
# 5. Dump "args" to config.yaml
# NOTE(kamo): "args" should be saved after object-buildings are done
# because they are allowed to modify "args".
output_dir = Path(args.output_dir)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
logging.info(
f'Saving the configuration in {output_dir / "config.yaml"}'
)
yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
if args.dry_run:
pass
elif args.collect_stats:
# Perform on collect_stats mode. This mode has two roles
# - Derive the length and dimension of all input data
# - Accumulate feats, square values, and the length for whitening
logging.info(args)
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if len(args.train_shape_file) != 0:
train_key_file = args.train_shape_file[0]
else:
train_key_file = None
if len(args.valid_shape_file) != 0:
valid_key_file = args.valid_shape_file[0]
else:
valid_key_file = None
collect_stats(
model=model,
train_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
key_file=train_key_file,
batch_size=args.batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args, train=False),
),
valid_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
key_file=valid_key_file,
batch_size=args.valid_batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args, train=False),
),
output_dir=output_dir,
ngpu=args.ngpu,
log_interval=args.log_interval,
write_collected_feats=args.write_collected_feats,
)
else:
# 6. Loads pre-trained model
for p in args.init_param:
logging.info(f"Loading pretrained params from {p}")
load_pretrained_model(
model=model,
init_param=p,
ignore_init_mismatch=args.ignore_init_mismatch,
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
map_location=f"cuda:{torch.cuda.current_device()}"
if args.ngpu > 0
else "cpu",
)
# 7. Build iterator factories
if args.multiple_iterator:
train_iter_factory = cls.build_multiple_iter_factory(
args=args,
distributed_option=distributed_option,
mode="train",
)
else:
train_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="train",
)
valid_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="valid",
)
if args.num_att_plot != 0:
plot_attention_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="plot_att",
)
else:
plot_attention_iter_factory = None
# 8. Start training
if args.use_wandb:
if wandb is None:
raise RuntimeError("Please install wandb")
try:
wandb.login()
except wandb.errors.UsageError:
logging.info("wandb not configured! run `wandb login` to enable")
args.use_wandb = False
if args.use_wandb:
if (
not distributed_option.distributed
or distributed_option.dist_rank == 0
):
if args.wandb_project is None:
project = "ESPnet_" + cls.__name__
else:
project = args.wandb_project
if args.wandb_name is None:
name = str(Path(".").resolve()).replace("/", "_")
else:
name = args.wandb_name
wandb.init(
entity=args.wandb_entity,
project=project,
name=name,
dir=output_dir,
id=args.wandb_id,
resume="allow",
)
wandb.config.update(args)
else:
# wandb also supports grouping for distributed training,
# but we only logs aggregated data,
# so it's enough to perform on rank0 node.
args.use_wandb = False
# Don't give args to trainer.run() directly!!!
# Instead of it, define "Options" object and build here.
trainer_options = cls.trainer.build_options(args)
cls.trainer.run(
model=model,
optimizers=optimizers,
schedulers=schedulers,
train_iter_factory=train_iter_factory,
valid_iter_factory=valid_iter_factory,
plot_attention_iter_factory=plot_attention_iter_factory,
trainer_options=trainer_options,
distributed_option=distributed_option,
)
if wandb.run:
wandb.finish()
@classmethod
def build_iter_options(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
):
if mode == "train":
preprocess_fn = cls.build_preprocess_fn(args, train=True)
collate_fn = cls.build_collate_fn(args, train=True)
data_path_and_name_and_type = args.train_data_path_and_name_and_type
shape_files = args.train_shape_file
batch_size = args.batch_size
batch_bins = args.batch_bins
batch_type = args.batch_type
max_cache_size = args.max_cache_size
max_cache_fd = args.max_cache_fd
distributed = distributed_option.distributed
num_batches = None
num_iters_per_epoch = args.num_iters_per_epoch
train = True
elif mode == "valid":
preprocess_fn = cls.build_preprocess_fn(args, train=False)
collate_fn = cls.build_collate_fn(args, train=False)
data_path_and_name_and_type = args.valid_data_path_and_name_and_type
shape_files = args.valid_shape_file
if args.valid_batch_type is None:
batch_type = args.batch_type
else:
batch_type = args.valid_batch_type
if args.valid_batch_size is None:
batch_size = args.batch_size
else:
batch_size = args.valid_batch_size
if args.valid_batch_bins is None:
batch_bins = args.batch_bins
else:
batch_bins = args.valid_batch_bins
if args.valid_max_cache_size is None:
# Cache 5% of maximum size for validation loader
max_cache_size = 0.05 * args.max_cache_size
else:
max_cache_size = args.valid_max_cache_size
max_cache_fd = args.max_cache_fd
distributed = distributed_option.distributed
num_batches = None
num_iters_per_epoch = None
train = False
elif mode == "plot_att":
preprocess_fn = cls.build_preprocess_fn(args, train=False)
collate_fn = cls.build_collate_fn(args, train=False)
data_path_and_name_and_type = args.valid_data_path_and_name_and_type
shape_files = args.valid_shape_file
batch_type = "unsorted"
batch_size = 1
batch_bins = 0
num_batches = args.num_att_plot
max_cache_fd = args.max_cache_fd
# num_att_plot should be a few sample ~ 3, so cache all data.
max_cache_size = np.inf if args.max_cache_size != 0.0 else 0.0
# always False because plot_attention performs on RANK0
distributed = False
num_iters_per_epoch = None
train = False
else:
raise NotImplementedError(f"mode={mode}")
return IteratorOptions(
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
data_path_and_name_and_type=data_path_and_name_and_type,
shape_files=shape_files,
batch_type=batch_type,
batch_size=batch_size,
batch_bins=batch_bins,
num_batches=num_batches,
max_cache_size=max_cache_size,
max_cache_fd=max_cache_fd,
distributed=distributed,
num_iters_per_epoch=num_iters_per_epoch,
train=train,
)
@classmethod
def build_iter_factory(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
kwargs: dict = None,
) -> AbsIterFactory:
"""Build a factory object of mini-batch iterator.
This object is invoked at every epochs to build the iterator for each epoch
as following:
>>> iter_factory = cls.build_iter_factory(...)
>>> for epoch in range(1, max_epoch):
... for keys, batch in iter_fatory.build_iter(epoch):
... model(**batch)
The mini-batches for each epochs are fully controlled by this class.
Note that the random seed used for shuffling is decided as "seed + epoch" and
the generated mini-batches can be reproduces when resuming.
Note that the definition of "epoch" doesn't always indicate
to run out of the whole training corpus.
"--num_iters_per_epoch" option restricts the number of iterations for each epoch
and the rest of samples for the originally epoch are left | |
<reponame>thomasbazeille/public_protocols
import numpy as np
from psychopy import core, visual, event
from utils import show_fixation_cross
def get_wedge(angular_position,
checker_phase=0.,
checker_cycles_radial=10,
checker_cycles_angular=2.5,
outer_radius=500,
inner_radius=80,
size=None,
angular_width=30,
soft=False,
phase_is_relative_to_wedge=True):
"""Will return an array of 2 * radius x 2 * radius containing a
checkerboard wedge of given width, given checker frequency and phase"""
if size is None:
size = 2 * outer_radius
half_size = size / 2
wedge_cartesian_coords = np.mgrid[-half_size:half_size,
-half_size:half_size]
radii = np.sqrt((wedge_cartesian_coords ** 2).sum(axis=0))
angles = np.arctan2(*wedge_cartesian_coords[::-1]) + np.pi
the_phase = checker_phase / 180. * np.pi
if phase_is_relative_to_wedge:
the_phase += angular_position / 180. * np.pi
raw_angular_checker = np.cos(
(angles - the_phase) *
360. / angular_width *
checker_cycles_angular)
raw_radial_checker = np.cos(radii / (outer_radius - inner_radius) *
2 * np.pi *
checker_cycles_radial)
radial_mask = (inner_radius <= radii) & (radii <= outer_radius)
angular_mask = np.abs(
(angles / (2 * np.pi) - (angular_position / 360.) + .5) % 1.
- .5) <= angular_width / 360. / 2.
image = raw_angular_checker * raw_radial_checker * \
radial_mask * angular_mask
if not soft:
return np.sign(image)
return image
def get_ring(radial_position,
checker_phase=0.,
checker_cycles_radial=2,
checker_cycles_angular=30,
radial_width=50,
size=500,
soft=False,
phase_is_relative_to_ring=True):
half_size = size / 2
wedge_cartesian_coords = np.mgrid[-half_size:half_size,
-half_size:half_size]
radii = np.sqrt((wedge_cartesian_coords ** 2).sum(axis=0))
angles = np.arctan2(*wedge_cartesian_coords[::-1]) + np.pi
the_phase = checker_phase / 180. * np.pi
raw_angular_checker = np.cos(
angles * checker_cycles_angular)
if phase_is_relative_to_ring:
the_phase = (radial_position / radial_width *
2 * np.pi * checker_cycles_radial)
raw_radial_checker = np.cos(radii / radial_width *
2 * np.pi *
checker_cycles_radial - the_phase)
radial_mask = (radial_position <= radii) & (radii <=
radial_position +
radial_width)
image = raw_angular_checker * raw_radial_checker * radial_mask
if not soft:
return np.sign(image)
return image
# def retino_wedge_iterator(period_seconds=32, flicker_hertz=5,
# n_rounds=2,
# frame_rate_hertz=10,
# inner_radius=40, outer_radius=250,
# checker_radial=10,
# checker_angular=2.5,
# angular_width=30,
# reverse=False,
# preload=True):
# if reverse:
# r = -1
# else:
# r = 1
# n_degrees_total = 360. * n_rounds
# n_frames_total = n_rounds * period_seconds * frame_rate_hertz
# deg_per_frame = n_degrees_total / n_frames_total
# frame_degs = np.arange(n_frames_total) * deg_per_frame
# frame_times = np.arange(n_frames_total) / float(frame_rate_hertz)
# flicker_sign = (((frame_times * flicker_hertz) % 2) > 1) * 2. - 1
# if preload:
# all_frames = [
# (get_wedge(r * d,
# inner_radius=inner_radius,
# outer_radius=outer_radius,
# checker_cycles_radial=checker_radial,
# checker_cycles_angular=checker_angular,
# angular_width=angular_width), f, t)
# for d, t, f in zip(frame_degs, frame_times, flicker_sign)]
# # import IPython
# # IPython.embed()
# for image, f, t in all_frames:
# yield image * f, t
# return
# for d, t, f in zip(frame_degs, frame_times, flicker_sign):
# image = get_wedge(r * d,
# inner_radius=inner_radius,
# outer_radius=outer_radius,
# checker_cycles_radial=checker_radial,
# checker_cycles_angular=checker_angular,
# angular_width=angular_width)
# yield image * f, t
# def retino_ring_iterator(period_seconds=32, flicker_hertz=5,
# n_rounds=2,
# frame_rate_hertz=10,
# radial_width=30,
# size=500,
# checker_radial=2.5,
# checker_angular=30,
# reverse=False):
# r = -1 if reverse else 1
# r_step = 1. / (period_seconds * frame_rate_hertz)
# total_frames = n_rounds * period_seconds * frame_rate_hertz
# rs = (np.arange(total_frames) * r_step) % 1.
# frame_times = np.arange(total_frames) / float(frame_rate_hertz)
# flicker_sign = (((frame_times * flicker_hertz) % 2) > 1) * 2. - 1
# max_radius = size / 2 - radial_width
# # import IPython
# # IPython.embed()
# for r, t, f in zip(rs[::r], frame_times, flicker_sign):
# image = get_ring(r * max_radius,
# radial_width=radial_width,
# size=size,
# checker_cycles_radial=checker_radial,
# checker_cycles_angular=checker_angular)
# yield image * f, t
# class StimBuffer(object):
# """Takes a stimulus iterator and precaches a certain amount of images"""
# def __init__(self, stim_iterator, buffer_size=300):
# self.stim_iterator = stim_iterator
# self.buffer_size = buffer_size
# def init_buffer(self):
# self.stim_iterator_ = iter(self.stim_iterator)
# self.buffer_ = []
# self.times = []
# self.skipped = []
# for i in range(self.buffer_size):
# try:
# image, timing = next(self.stim_iterator_)
# self.buffer_.append(image)
# self.times.append(timing)
# except StopIteration:
# break
# def fetch(self, ):
# pass
# def show_retino_iterator(window, iterator):
# clock = core.Clock()
# true_timings = []
# active_index = -1
# stim_container = visual.ImageStim(window, size=(500, 500), units='pix',
# autoLog=False)
# for i, (image, timing) in enumerate(iterator):
# while True:
# if 'escape' in event.getKeys(['escape']):
# return
# t = clock.getTime()
# if (t >= timing) and (active_index < i):
# true_timings.append((t, timing))
# stim_container.setImage(image)
# stim_container.draw()
# show_fixation_cross(window)
# window.flip()
# active_index = i
# break
# return true_timings
def _preload_round(type="wedge", reverse=False,
duration_seconds=32.,
frame_rate_hertz=30.,
flicker_hertz=1.,
wedge_angular_width=30.,
ring_radial_width=30.,
size=500):
"""
Specifies the type of movement: wedge (anti-clock,
clock) and ring(expanding, contracting):
duration: time per round
frame_rate: no. images/s
flicker_hertz: how many times black and white changes/s
wedge_angular_width: how much angle it is covered by the width of the wedge (degrees)
ring_radial_width: width of the ring in no. of pixels
size: images' size, i.e. no. of pixels in x and y direction
"""
func_args = dict(
checker_phase=0.,
size=size,
soft=False
)
n_frames = duration_seconds * frame_rate_hertz
frame_times = np.arange(n_frames) / frame_rate_hertz
if type == "wedge":
stimfunc = get_wedge
step = 360. / n_frames
func_args["checker_cycles_radial"] = 10
func_args["checker_cycles_angular"] = 2.5
func_args["angular_width"] = wedge_angular_width
func_args["inner_radius"] = 5.
func_args["outer_radius"] = size / 2.
elif type == "ring":
stimfunc = get_ring
max_radius = float(size) / 2 - ring_radial_width
step = max_radius / n_frames
func_args["checker_cycles_radial"] = 2.5
func_args["checker_cycles_angular"] = 30
func_args["radial_width"] = ring_radial_width
else:
raise ValueError("type must be 'wedge' or 'ring'")
r = -1 if reverse else 1
steps = np.arange(n_frames) * step
flicker_sign = (((frame_times * flicker_hertz) % 2) > 1) * 2. - 1
frames = [stimfunc(d, **func_args) * f
for d, f in zip(steps[::r], flicker_sign)]
return frames
def preload_images(type="wedge", direction="clockwise",
duration_seconds=32.,
frame_rate_hertz=20.,
n_rounds=10,
flicker_hertz=5.,
wedge_angular_width=30,
ring_radial_width=30,
size=500,
):
"""
It gives a list of all the images that will be presented.
"""
one_round = list(_preload_round(type, direction,
duration_seconds=duration_seconds,
frame_rate_hertz=frame_rate_hertz,
flicker_hertz=flicker_hertz,
wedge_angular_width=wedge_angular_width,
ring_radial_width=ring_radial_width,
size=size))
all_rounds = one_round * n_rounds
return all_rounds
def present(window, stimulus_list, fixation_callback=None, frame_hertz=30.):
"""
It presents the stimulus.
window parameter is a psychopy window, and the stimulus list comes from
the previous function, i.e., preload_images().
"""
n_frames = len(stimulus_list)
clock = core.Clock()
stim_container = visual.ImageStim(window, size=(600, 600),
units="pix", autoLog=False)
shown_frame = -1
shown_frames = []
shown_times = []
while True:
if "escape" in event.getKeys(["escape"]):
break
t = clock.getTime()
current_frame = int(t * frame_hertz)
# print current_frame, t
if current_frame > n_frames - 1:
break
if current_frame > shown_frame:
stim_container.setImage(stimulus_list[current_frame])
stim_container.draw()
if fixation_callback is not None:
# import IPython
# IPython.embed()
fixation_callback(window, t)
window.flip()
shown_frame = current_frame
shown_frames.append(shown_frame)
shown_times.append(t)
return shown_times, shown_frames
def get_retino_fixation(run):
import pandas
from utils import colors as color_dict
from utils import get_fixation_cross_presenter
df = pandas.load("retino_fixation_%d.csv" % run)
times = df['times'].values
colors = df['colors'].values
count = dict([(c, 0) for c in np.unique(colors)])
# get majority color. Horrible code. Fast hack. pandas can do it ootb
for c in colors:
count[c] = count[c] + 1
pref_color = None
pref_presence = 0
for k, v in count.items():
if v > pref_presence:
pref_presence = v
pref_color = k
color_vals = [color_dict[c] for c in colors]
return get_fixation_cross_presenter(zip(color_vals, times)), pref_color
color_button_correspondence = dict(
red='b', green='y', blue='g', yellow='r',
b='red', y='green', g='blue', r='yellow')
translation = dict(red='rouge', green='vert', blue='bleu', yellow='jaune')
if __name__ == "__main__":
# import matplotlib.pyplot as plt
# plt.figure()
# plt.subplot(2, 2, 1)
# plt.imshow(get_wedge(0.), interpolation='nearest')
# plt.gray()
# plt.subplot(2, 2, 2)
# plt.imshow(get_wedge(90.), interpolation='nearest')
# plt.subplot(2, 2, 3)
# plt.imshow(get_wedge(180.), interpolation='nearest')
# plt.subplot(2, 2, 4)
# plt.imshow(get_wedge(270.), interpolation='nearest')
# plt.show()
# import matplotlib.pyplot as plt
# plt.figure()
# plt.subplot(2, 2, 1)
# plt.imshow(get_ring(0.), interpolation='nearest')
# plt.gray()
# plt.subplot(2, 2, 2)
# plt.imshow(get_ring(50.), interpolation='nearest')
# plt.subplot(2, 2, 3)
# plt.imshow(get_ring(100.), interpolation='nearest')
# plt.subplot(2, 2, 4)
# plt.imshow(get_ring(200.), interpolation='nearest')
# plt.show()
# window = visual.Window(fullscr=False)
# it = retino_ring_iterator(reverse=False)
# true_timings = show_retino_iterator(window, it)
# window.close()
from utils import show_init_screen, quiz
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('number')
args = parser.parse_args()
number = int(args.number)
if number in [2, 4, 6, 8]:
reverse = True
elif number in [1, 3, 5, 7]:
reverse = False
else:
raise Exception("expect 1, 2, 3, 4, 5, 6, 7, 8 as argument")
if number in [1, 2, 5, 6]:
retino_type = "wedge"
elif number in [3, 4, 7, 8]:
retino_type = "ring"
fixation_callback, pref_color = get_retino_fixation(number)
window = visual.Window(fullscr=True)
show_init_screen(window, u"Chargement \n"
u"Lors de cette exp\u00E9rience, fixer le point central.\n"
u"Le point changera de couleur. D\u00E9terminer"
u" laquelle \n"
u"des couleurs est pr\u00E9sent\u00E9e le plus de "
u"temps.", wait_key=None)
# true_timings = show_retino_iterator(window, iterator)
images = preload_images(retino_type, reverse, size=600)
show_init_screen(window, u"Chargement TERMINE.\n"
u"Lors de cette exp\u00E9rience, fixer le point central.\n"
u"Le point changera de couleur. D\u00E9terminer"
u" laquelle \n"
u"des couleurs est pr\u00E9sent\u00E9e le plus de "
u"temps.", wait_key="space")
if show_init_screen(window, "", wait_key="t"):
shown_times, | |
or len(xlis) < 1:
return
detail: dict = {}
idx: int = 0
for xli in xlis:
try:
xli: etree._Element = xli
if xli is None:
continue
xdiv: etree._Element = xli.find('div/div/div/div/div[2]')
if xdiv is None:
continue
# url
xa: etree._Element = xdiv.find('div[1]/a')
if xa is None:
continue
schoolurl: str = xa.get('href')
if not schoolurl is None and schoolurl != "":
detail[f"学校{idx}url"] = schoolurl
# text
alltext = ";".join(xdiv.itertext())
if not alltext is None and alltext != "":
detail[f"学校{idx}"] = alltext
except Exception as ex:
self._logger.debug(
"Parse one edu_edu information failed: {} {} {}".
format(profile.nickname, profile.url, ex.args))
finally:
idx += 1
if len(detail) > 0:
profile.set_details(**detail)
except Exception:
self._logger.error(
"Parse edu_edu failed: username:{} url:{}\nerror: {}".format(
profile._networkid, profile.url, traceback.format_exc()))
###################################
# living
def _get_living(self, profile: NetworkProfile):
"""住址信息"""
try:
# education
# https://www.facebook.com/profile.php?id=100030846743121&sk=about§ion=overview&lst=100013325533097%3A100030846743121%3A1568790537
url: str = "https://www.facebook.com/profile.php?id={}&sk=about§ion=living&lst={}%3A{}%3A{}".format(
profile._userid, self._userid, profile._userid,
helper_time.ts_since_1970(10))
html = self._ha.getstring(url,
headers="""
accept: */*
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
cache-control: no-cache
content-type: application/x-www-form-urlencoded
origin: https://www.facebook.com
pragma: no-cache
referer: {}
sec-fetch-mode: cors
sec-fetch-site: same-origin""".format(profile.url))
if html is None:
return
address = helper_str.substring(html, 'data-hovercard-prefer-more-content-show="1">', '<')
if address:
profile.address = address
except Exception:
self._logger.error(
"Get education page failed: username:{} url:{}".format(
profile._networkid, profile.url))
# 失效
def __parse_living_home(self, profile: NetworkProfile, ul: str):
"""解析工作地"""
try:
hdoc: etree._Element = etree.XML(ul, etree.XMLParser())
if hdoc is None:
self._logger.error(
"Parse XML living_home failed: {} {}".format(
profile.nickname, profile.url))
return
xlis = hdoc.findall('li')
if xlis is None or len(xlis) < 1:
return
detail: dict = {}
idx: int = 0
for xli in xlis:
try:
xli: etree._Element = xli
if xli is None:
continue
xdiv: etree._Element = xli.find(
'div/div/div/div/div/div[2]')
if xdiv is None:
continue
# url
xa: etree._Element = xdiv.find('span/a')
if xa is None:
continue
href: str = xa.get('href')
if not href is None and href != "":
detail[f"地址{idx}url"] = href
# text
alltext = ";".join(xdiv.itertext())
if not alltext is None and alltext != "":
detail[f"地址{idx}"] = alltext
if profile.address is None:
profile.address = ""
profile.address += "{}\n".format(alltext)
except Exception as ex:
self._logger.debug(
"Parse one living_home information failed: {} {} {}".
format(profile.nickname, profile.url, ex.args))
finally:
idx += 1
if len(detail) > 0:
profile.set_details(**detail)
except Exception:
self._logger.error(
"Parse living_home failed: username:{} url:{}\nerror: {}".
format(profile._networkid, profile.url,
traceback.format_exc()))
###################################
# contact-info 联系信息和基本信息
def _get_addrinfo(self, profile: NetworkProfile):
"""联系信息"""
try:
# education
# https://www.facebook.com/profile.php?id=100030846743121&sk=about§ion=overview&lst=100013325533097%3A100030846743121%3A1568790537
url: str = "https://www.facebook.com/profile.php?id={}&sk=about§ion=contact-info&lst={}%3A{}%3A{}".format(
profile._userid, self._userid, profile._userid,
helper_time.ts_since_1970(10))
html = self._ha.getstring(url,
headers="""
accept: */*
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
cache-control: no-cache
content-type: application/x-www-form-urlencoded
origin: https://www.facebook.com
pragma: no-cache
referer: {}
sec-fetch-mode: cors
sec-fetch-site: same-origin""".format(profile.url))
if html is None:
return
soup = BeautifulSoup(html.replace('<!--', '').replace('-->', ''), 'lxml')
# photo
photo = soup.select_one('._11kf.img')
if photo:
try:
pic_url = photo.attrs['src'].replace('amp;', '')
pic = self._ha.get_response_stream(pic_url)
profile._profile_pic = helper_str.base64bytes(pic.read())
except:
pass
codes = soup.select('.hidden_elem code')
for code in codes:
str_code = str(code)
code = BeautifulSoup(str_code, 'lxml')
if str_code.__contains__('性别') or str_code.__contains__('出生日期'):
sex = re.findall(r'性别.*?class="_2iem">(.*?)</span>', str_code)
if sex:
profile.gender = sex[0]
birth = re.findall(r'出生日期.*?class="_2iem">(.*?)</span>', str_code)
if birth:
profile.birthday = birth[0]
elif str_code.__contains__('手机'):
try:
profile.set_phone(code.select_one('[class="_2iem"]').get_text('--*--').split('--*--')[0]
.replace('.', '').replace('-', '').replace(' ', ''))
except:
pass
elif str_code.__contains__('出生日期'):
profile.birthday = code.select_one('[class="_2iem"]').get_text()
except Exception:
self._logger.error(
"Get contact-info page failed: username:{} url:{}".format(
profile._networkid, profile.url))
# 失效
def __parse_contact_info(self, profile: NetworkProfile, ul: str):
"""解析联系信息"""
try:
hdoc: etree._Element = etree.XML(ul, etree.XMLParser())
if hdoc is None:
self._logger.error(
"Parse XML contactinfo failed: {} {}".format(
profile.nickname, profile.url))
return
xlis = hdoc.findall('li')
if xlis is None or len(xlis) < 1:
return
detail: dict = {}
idx: int = 0
for xli in xlis:
try:
xli: etree._Element = xli
if xli is None:
continue
xdiv: etree._Element = xli.find('div')
if xdiv is None:
continue
# key
xkey: etree._Element = xdiv.find('div[1]')
if xkey is None:
continue
key: str = ";".join(xkey.itertext())
if key is None or key == "":
continue
# text
xval: etree._Element = xdiv.find('div[2]')
if xval is None:
continue
val: str = "".join(xval.itertext())
if val is None or val == "":
continue
detail[key] = val
if key.__contains__("Mobile Phones") or key.__contains__(
"电话"):
profile.set_phone(val)
if key.__contains__("Email") or key.__contains__("邮"):
profile.set_email(val)
except Exception as ex:
self._logger.debug(
"Parse one contactinfo information failed: {} {} {}".
format(profile.nickname, profile.url, ex.args))
finally:
idx += 1
if len(detail) > 0:
profile.set_details(**detail)
except Exception:
self._logger.error(
"Parse contactinfo failed: username:{} url:{}\nerror: {}".
format(profile._networkid, profile.url,
traceback.format_exc()))
# 失效
def __parse_contact_websites(self, profile: NetworkProfile, ul: str):
"""解析工作地"""
try:
hdoc: etree._Element = etree.XML(ul, etree.XMLParser())
if hdoc is None:
self._logger.error(
"Parse XML contact websites failed: {} {}".format(
profile.nickname, profile.url))
return
xlis = hdoc.findall('li')
if xlis is None or len(xlis) < 1:
return
detail: dict = {}
for xli in xlis:
try:
xli: etree._Element = xli
if xli is None:
continue
xdiv: etree._Element = xli.find('div')
if xdiv is None:
continue
# key
xkey: etree._Element = xdiv.find('div[1]')
if xkey is None:
continue
key: str = ";".join(xkey.itertext())
if key is None or key == "":
continue
xsites: etree._Element = None
if key == "Websites":
xsites = xdiv.findall('.//li')
elif key == "Social Links":
xsites = xdiv.findall('div[2]/div/div/span/ul/li')
else:
xsites = xdiv.findall('.//li')
if xsites is None or len(xsites) < 1:
continue
idx: int = 0
for xsite in xsites:
try:
xsite: etree._Element = xsite
val = "".join(xsite.itertext())
if val is None or val == "":
continue
detail[f"{key}{idx}"] = val
except Exception:
self._logger.debug(
"Parse one contact websites information failed: {} {} {}"
.format(profile.nickname, profile.url,
ex.args))
finally:
idx += 1
except Exception as ex:
self._logger.debug(
"Parse one contact websites information failed: {} {} {}"
.format(profile.nickname, profile.url, ex.args))
if len(detail) > 0:
profile.set_details(**detail)
except Exception:
self._logger.error(
"Parse contact websites failed: username:{} url:{}\nerror: {}".
format(profile._networkid, profile.url,
traceback.format_exc()))
###################################
# relationship
def _get_relation(self, profile: NetworkProfile):
"""家庭关系"""
try:
url: str = "https://www.facebook.com/profile.php?id={}&sk=about§ion=relationship&lst={}%3A{}%3A{}".format(
profile._userid, self._userid, profile._userid,
helper_time.ts_since_1970(10))
html = self._ha.getstring(url,
headers="""
accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
cache-control: no-cache
pragma: no-cache
sec-fetch-mode: navigate
sec-fetch-site: same-origin
sec-fetch-user: ?1
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36
viewport-width: 1600""")
if html is None:
return
m = self._re_info_relation.search(html)
if m is None:
self._logger.debug("No relationship info found: {} {}".format(
profile.nickname, profile.url))
return
strdiv: str = m.group(1).strip()
m = self._re_relation.search(strdiv)
if not m is None:
ul = m.group(1).strip()
if not ul is None and ul != "":
self.__parse_relations(profile, ul)
except Exception:
self._logger.error(
"Get education page failed: username:{} url:{}".format(
profile._networkid, profile.url))
def __parse_relations(self, profile: NetworkProfile, ul: str):
"""家庭关系"""
try:
hdoc: etree._Element = etree.XML(ul, etree.XMLParser())
if hdoc is None:
self._logger.error(
"Parse XML contactinfo failed: {} {}".format(
profile.nickname, profile.url))
return
xlis = hdoc.findall('li')
if xlis is None or len(xlis) < 1:
return
detail: dict = {}
idx: int = 0
for xli in xlis:
try:
xli: etree._Element = xli
if xli is None:
continue
# text
text = ",".join(xli.itertext())
# url
xa = xli.find('div/div/div/div/div/div[2]/div/span/a')
if not xa is None:
url = xa.get('href')
if not url is None and url != "":
detail[f"关系{idx}url"] = url
detail[f"关系{idx}"] = text
except Exception as ex:
self._logger.debug(
"Parse one contactinfo information failed: {} {} {}".
format(profile.nickname, profile.url, ex.args))
finally:
idx += 1
if len(detail) > 0:
profile.set_details(**detail)
except Exception:
self._logger.error(
"Parse contactinfo failed: username:{} url:{}\nerror: {}".
format(profile._networkid, profile.url,
traceback.format_exc()))
###################################
# bio/detail
def _get_bio(self, profile: NetworkProfile):
"""住址信息"""
try:
# education
# https://www.facebook.com/profile.php?id=100030846743121&sk=about§ion=overview&lst=100013325533097%3A100030846743121%3A1568790537
url: str = "https://www.facebook.com/profile.php?id={}&sk=about§ion=bio&lst={}%3A{}%3A{}".format(
profile._userid, self._userid, profile._userid,
helper_time.ts_since_1970(10))
html = self._ha.getstring(url,
headers="""
accept: */*
accept-encoding: gzip, deflate
accept-language: en-US,en;q=0.9
cache-control: no-cache
content-type: application/x-www-form-urlencoded
origin: https://www.facebook.com
pragma: no-cache
referer: {}
sec-fetch-mode: cors
sec-fetch-site: same-origin""".format(profile.url))
if html is None:
return
m = self._re_bio_othernames.search(html)
if not m is None:
ul = m.group(1).strip()
self.__parse_bio_othernames(profile, ul)
m = self._re_bio_favorites.search(html)
if not m is None:
ul = m.group(1).strip()
self.__parse_bio_favorites(profile, ul)
m = self._re_bio_about.search(html)
if not m is None:
ul = m.group(1).strip()
self.__parse_bio_about(profile, ul)
except Exception:
self._logger.error(
"Get bio page failed: username:{} url:{}".format(
profile._networkid, profile.url))
def __parse_bio_othernames(self, profile: NetworkProfile, ul: str):
"""详情othernames"""
try:
hdoc: etree._Element = etree.XML(ul, etree.XMLParser())
if hdoc is None:
self._logger.error(
"Parse XML bio othernames failed: {} {}".format(
profile.nickname, profile.url))
return
xkey = hdoc.find('div/span')
if xkey is None:
return
key = ",".join(xkey.itertext())
if key is None or key == "":
return
xlis = hdoc.findall('ul/li')
if xlis is None or len(xlis) < 1:
return
detail: dict = {}
idx: int = 0
for | |
<gh_stars>10-100
#!/usr/bin/python
import socket
import errno
import logging
import os
import platform
if platform.system() != 'Java':
from select import select
else:
from select import cpython_compatible_select as select
# Disable socks support in Jython
if platform.system() != 'Java':
import socks
else:
if 'socks_proxy' in os.environ:
logging.warn('Unable to honour socks_proxy environment variable, unsupported in Jython')
from prober_utils import *
settings = {
# Note that changing these will invalidate many of the fingerprints
'default_hello_version': TLSRecord.TLS1_0,
'default_record_version': TLSRecord.TLS1_0,
'socket_timeout': 5
}
class Probe(object):
#
# Reusable standard elements
#
def __init__(self):
self.ipaddress = None
def connect(self, ipaddress, port, starttls_mode):
self.ipaddress = ipaddress
# Check if we're using socks
if 'socks_proxy' in os.environ:
socks_host, socks_port = os.environ['socks_proxy'].split(':')
s = socks.socksocket()
s.setproxy(socks.PROXY_TYPE_SOCKS5, socks_host, int(socks_port))
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(settings['socket_timeout'])
s.connect((ipaddress, port))
# Do starttls if relevant
starttls(s, port, starttls_mode)
# return s.makefile('rw', 0)
return s
def test(self, sock):
pass
def process_response(self, sock):
response = ''
got_done = False
while True:
# Check if there is anything following the server done
if got_done:
# If no data then we're done (the server hasn't sent anything further)
# we allow 500ms to give the followup time to arrive
if not select([sock.fileno(),],[],[],0.5)[0]:
break
try:
record = read_tls_record(sock)
response += '*(%x)' % record.version() # TODO: Not sure that recording the record layer version is worth it?
except socket.timeout as e:
response += 'error:timeout'
break
except socket.error as e:
# response += 'error:%s|' % errno.errorcode[e.errno]
response += 'error:%s' % e
break
except IOError as e:
response += 'error:%s|' % str(e)
break
if record.content_type() == TLSRecord.Handshake:
# A single handshake record can contain multiple handshake messages
processed_bytes = 0
while processed_bytes < record.message_length():
message = HandshakeMessage.from_bytes(record.message()[processed_bytes:])
if message.message_type() == message.ServerHello:
response += 'handshake:%s(%x)|' % (message.message_types[message.message_type()], message.server_version())
else:
response += 'handshake:%s|' % (message.message_types[message.message_type()])
if message.message_type() == HandshakeMessage.ServerHelloDone:
got_done = True
processed_bytes += message.message_length() + 4
if got_done:
continue
elif record.content_type() == TLSRecord.Alert:
alert = AlertMessage.from_bytes(record.message())
if alert.alert_level() == AlertMessage.Fatal:
response += 'alert:%s:fatal|' % alert.alert_types[alert.alert_type()]
break
else:
response += 'alert:%s:warning|' % alert.alert_types[alert.alert_type()]
else:
if record.content_type() in record.content_types:
response += 'record:%s|' % record.content_types[record.content_type()]
else:
response += 'record:type(%x)|' % record.content_type()
if got_done:
break
return response
def probe(self, ipaddress, port, starttls):
sock = self.connect(ipaddress, port, starttls)
try:
result = self.test(sock)
except socket.timeout as e:
result = 'writeerror:timeout'
try:
sock.shutdown(socket.SHUT_RD_WR)
except:
pass
sock.close()
return result
except socket.error as e:
result = 'writeerror:%s|' % errno.errorcode[e.errno]
try:
sock.shutdown(socket.SHUT_RD_WR)
except:
pass
sock.close()
return result
if result:
return result
result = self.process_response(sock)
try:
sock.shutdown(socket.SHUT_RD_WR)
except:
pass
sock.close()
return result
class NormalHandshake(Probe):
'''A normal handshake'''
def __init__(self):
super(NormalHandshake, self).__init__()
self.make_hello = make_hello
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
class NormalHandshakePFS(NormalHandshake):
'''Normal handshake with PFS ciphersuites'''
def __init__(self):
super(NormalHandshakePFS, self).__init__()
self.make_hello = make_pfs_hello
class NormalHandshake11(NormalHandshake):
'''Normal TLSv1.1 handshake'''
def __init__(self):
super(NormalHandshake11, self).__init__()
self.make_hello = make_11_hello
class NormalHandshake11PFS(NormalHandshake):
'''Normal TLSv1.1 handshake'''
def __init__(self):
super(NormalHandshake11PFS, self).__init__()
self.make_hello = make_11_pfs_hello
class NormalHandshake12(NormalHandshake):
'''Normal TLSv1.2 handshake'''
def __init__(self):
super(NormalHandshake12, self).__init__()
self.make_hello = make_12_hello
class NormalHandshake12PFS(NormalHandshake):
'''Normal TLSv1.2 handshake with PFS ciphersuites'''
def __init__(self):
super(NormalHandshake12PFS, self).__init__()
self.make_hello = make_12_pfs_hello
class NormalHandshake12PFSw13(Probe):
'''TLSv1.2 with PFS ciphers with a TLSv1.3 version (invalid TLSv1.3)'''
def make_hello(self):
hello = ClientHelloMessage.create(TLSRecord.TLS1_3,
b'01234567890123456789012345678901',
DEFAULT_PFS_CIPHERS)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
class ValidSessionID(Probe):
'''Send session ID that is too long'''
def __init__(self):
self.hello_version = TLSRecord.TLS1_2
self.ciphers = MAX_CIPHERS
def make_hello_payload(self, version, cipher_suites):
session_id = b'0123456789' # session ID is up to 32 bytes long
ciphers = struct.pack('>H{0}H'.format(len(cipher_suites)),
len(cipher_suites) * 2, *cipher_suites)
hello = (struct.pack('>H32sB',
version,
b'01234567890123456789012345678901',
len(session_id)) +
session_id + ciphers + b'\x01\x00' + b'\x00\x00')
return hello
def make_hello(self, version, cipher_suites):
hello = self.make_hello_payload(version, cipher_suites)
hello_msg = HandshakeMessage.create(HandshakeMessage.ClientHello,
hello)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_2,
message=hello_msg.bytes)
return record.bytes
def generatePacket(self):
return self.make_hello(self.hello_version, self.ciphers)
def test(self, sock):
logging.debug('Sending Client Helo...')
sock.send(self.make_hello(self.hello_version, self.ciphers))
class InvalidSessionID(Probe):
'''Send session ID that is too long'''
def __init__(self):
self.hello_version = TLSRecord.TLS1_0
self.ciphers = DEFAULT_CIPHERS
def make_hello_payload(self, version, cipher_suites):
session_id = b'0123456789' * 4 # session ID is up to 32 bytes long
ciphers = struct.pack('>H{0}H'.format(len(cipher_suites)),
len(cipher_suites) * 2, *cipher_suites)
hello = (struct.pack('>H32sB',
version,
b'01234567890123456789012345678901',
len(session_id)) +
session_id + ciphers + b'\x01\x00' + b'\x00\x00')
return hello
def make_hello(self, version, cipher_suites):
hello = self.make_hello_payload(version, cipher_suites)
hello_msg = HandshakeMessage.create(HandshakeMessage.ClientHello,
hello)
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello_msg.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Helo...')
sock.send(self.make_hello(self.hello_version, self.ciphers))
class InvalidSessionID12(InvalidSessionID):
'''Send session ID that is too long in TLSv1.2 hello'''
def __init__(self):
super(InvalidSessionID12, self).__init__()
self.hello_version = TLSRecord.TLS1_2
self.ciphers = DEFAULT_12_CIPHERS
class InvalidSessionID12PFS(InvalidSessionID):
'''Send session ID that is too long in PFS TLSv1.2 hello'''
def __init__(self):
super(InvalidSessionID12PFS, self).__init__()
self.hello_version = TLSRecord.TLS1_2
self.ciphers = DEFAULT_PFS_CIPHERS
class InvalidCiphersLength(InvalidSessionID):
'''Send client hello with length field of ciphers that is invalid (odd)'''
def make_hello_payload(self, version, cipher_suites):
cipher_bytes = struct.pack('>{0}H'.format(len(cipher_suites)),
*cipher_suites) + b'\x00'
ciphers = struct.pack('>H', len(cipher_bytes)) + cipher_bytes
hello = (struct.pack('>H32sB', version,
b'01234567890123456789012345678901',
0) +
ciphers + b'\x01\x00' + b'\x00\x00')
return hello
class InvalidCiphersLength12(InvalidCiphersLength, InvalidSessionID12):
'''As with InvalidCiphersLength but with TLSv1.2 helo'''
pass
class InvalidCiphersLength12PFS(InvalidCiphersLength, InvalidSessionID12PFS):
'''As with InvalidCiphersLength but with PFS TLSv1.2 hello'''
pass
class InvalidExtLength(InvalidSessionID):
'''Send client hello with length of extensions filed truncated'''
def make_hello_payload(self, version, cipher_suites):
ciphers = struct.pack('>H{0}H'.format(len(cipher_suites)),
len(cipher_suites) * 2, *cipher_suites)
hello = (struct.pack('>H32sB',
version,
b'01234567890123456789012345678901',
0) +
ciphers + b'\x01\x00' + b'\x00')
return hello
class InvalidExtLength12(InvalidExtLength, InvalidSessionID12):
'''As with InvalidExtLength but in TLSv1.2 hello'''
pass
class InvalidExtLength12PFS(InvalidExtLength, InvalidSessionID12PFS):
'''As with InvalidExtLength but in PFS TLSv1.2 hello'''
pass
class ExtensionsUnderflow(InvalidSessionID):
'''Send hello with data length lower than stated size'''
def make_hello_payload(self, version, cipher_suites):
ciphers = struct.pack('>H{0}H'.format(len(cipher_suites)),
len(cipher_suites) * 2, *cipher_suites)
hello = (struct.pack('>H32sB',
version,
b'01234567890123456789012345678901',
0) +
ciphers + b'\x01\x00'
b'\x00\x01' # extensions length, just one byte
b'\xff\x01' # extension ID - secure renego indication
b'\x00\x01' # secure renego indication ext length
b'\x00') # valid payload for extension
return hello
class ExtensionsUnderflow12(ExtensionsUnderflow, InvalidSessionID12):
'''As in ExtensionsUnderflow but in TLSv1.2 hello'''
pass
class ExtensionsUnderflow12PFS(ExtensionsUnderflow, InvalidSessionID12PFS):
'''As in ExtensionsUnderflow but in PFS TLSv1.2 hello'''
pass
class EmptyCompression(InvalidSessionID):
'''Send hello with no compression methods'''
def make_hello_payload(self, version, cipher_suites):
ciphers = struct.pack('>H{0}H'.format(len(cipher_suites)),
len(cipher_suites) * 2, *cipher_suites)
hello = (struct.pack('>H32sB',
version,
b'01234567890123456789012345678901',
0) +
ciphers + b'\x00' + b'\x00\x00')
return hello
class EmptyCompression12(EmptyCompression, InvalidSessionID12):
'''As with EmptyCompression but in TLSv1.2 hello'''
pass
class EmptyCompression12PFS(EmptyCompression, InvalidSessionID12PFS):
'''As with EmptyCompression but in PFS TLSv1.2 hello'''
pass
class CompressOnly(InvalidSessionID):
'''Send hello with no support for uncompressed communication'''
def make_hello_payload(self, version, cipher_suites):
ciphers = struct.pack('>H{0}H'.format(len(cipher_suites)),
len(cipher_suites) * 2, *cipher_suites)
hello = (struct.pack('>H32sB',
version,
b'01234567890123456789012345678901',
0) +
ciphers + b'\x02\x01\x40' + b'\x00\x00')
return hello
class CompressOnly12(CompressOnly, InvalidSessionID12):
'''As with CompressOnly but in TLSv1.2 hello'''
pass
class CompressOnly12PFS(CompressOnly, InvalidSessionID12PFS):
'''As with CompressOnly but in PFS TLSv1.2 hello'''
pass
class DoubleClientHello(NormalHandshake):
'''Two client hellos'''
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
class DoubleClientHello12(DoubleClientHello, NormalHandshake12):
'''Two client hellos, TLSv1.2'''
pass
class DoubleClientHello12PFS(DoubleClientHello, NormalHandshake12PFS):
'''Two client hellos, TLSv1.2 w/PFS ciphers'''
pass
class ChangeCipherSpec(NormalHandshake):
'''Send a hello then change cipher spec'''
def __init__(self):
super(ChangeCipherSpec, self).__init__()
self.make_ccs = make_ccs
self.record_version = TLSRecord.TLS1_0
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
logging.debug('Sending ChangeCipherSpec...')
sock.send(self.make_ccs(self.record_version))
class ChangeCipherSpec12(ChangeCipherSpec, NormalHandshake12):
'''Send TLSv1.2 hello then change cipher spec'''
def __init__(self):
super(ChangeCipherSpec12, self).__init__()
self.record_version = TLSRecord.TLS1_2
class ChangeCipherSpec12PFS(NormalHandshake12PFS, ChangeCipherSpec12):
'''Send PFS TLSv1.2 hello then change cipher spec'''
pass
class HelloRequest(NormalHandshake):
'''Send a hello then hello request'''
def __init__(self):
super(HelloRequest, self).__init__()
self.make_hello_request = make_hello_request
self.record_version = TLSRecord.TLS1_0
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
logging.debug('Sending Hello Request...')
sock.send(self.make_hello_request(self.record_version))
class HelloRequest12(HelloRequest, NormalHandshake12):
'''Send a TLSv1.2 hello then hello request'''
def __init__(self):
super(HelloRequest12, self).__init__()
self.record_version = TLSRecord.TLS1_2
class HelloRequest12PFS(NormalHandshake12PFS, HelloRequest12):
'''Send a PFS TLSv1.2 hello then hello request'''
pass
class EmptyChangeCipherSpec(NormalHandshake):
'''Send a hello then an empty change cipher spec'''
def __init__(self):
super(EmptyChangeCipherSpec, self).__init__()
self.record_version = TLSRecord.TLS1_0
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(self.make_hello())
logging.debug('Sending Empty ChangeCipherSpec...')
record = TLSRecord.create(content_type=TLSRecord.ChangeCipherSpec,
version=self.record_version,
message='')
sock.send(record.bytes)
class EmptyChangeCipherSpec12(EmptyChangeCipherSpec, NormalHandshake12):
'''Send TLSv1.2 hello then an empty change cipher spec'''
def __init__(self):
super(EmptyChangeCipherSpec12, self).__init__()
self.record_version = TLSRecord.TLS1_2
class EmptyChangeCipherSpec12PFS(NormalHandshake12PFS,
EmptyChangeCipherSpec12):
'''Send PFS TLSv1.2 hello then an empty change cipher spec'''
pass
class BadHandshakeMessage(Probe):
'''An invalid handshake message'''
def make_bad_handshake(self):
content = 'Something'
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=content)
#hexdump(record.bytes)
return record.bytes
def test(self, sock):
logging.debug('Sending Client Hello...')
sock.send(make_hello())
logging.debug('Sending bad handshake message...')
sock.send(self.make_bad_handshake())
class OnlyECCipherSuites(Probe):
'''Try connecting with ECC cipher suites only'''
def make_ec_hello(self):
hello = ClientHelloMessage.create(TLSRecord.TLS1_0,
b'01234567890123456789012345678901',
[TLS_ECDH_RSA_WITH_RC4_128_SHA,
TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
| |
= {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EnabledSchema',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_destination_tags(self, destination_id, **kwargs):
"""
Get the tags for this destination
Get the tags for this destination
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_destination_tags(destination_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str destination_id: Identifier for the destination (required)
:return: TagListSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_destination_tags_with_http_info(destination_id, **kwargs)
else:
(data) = self.get_destination_tags_with_http_info(destination_id, **kwargs)
return data
def get_destination_tags_with_http_info(self, destination_id, **kwargs):
"""
Get the tags for this destination
Get the tags for this destination
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_destination_tags_with_http_info(destination_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str destination_id: Identifier for the destination (required)
:return: TagListSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['destination_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_destination_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'destination_id' is set
if ('destination_id' not in params) or (params['destination_id'] is None):
raise ValueError("Missing the required parameter `destination_id` when calling `get_destination_tags`")
collection_formats = {}
resource_path = '/dispatch/destinations/{destinationId}/tags'.replace('{format}', 'json')
path_params = {}
if 'destination_id' in params:
path_params['destinationId'] = params['destination_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagListSchema',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_destinations(self, **kwargs):
"""
getAllDestinations
Get a list of destinations.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_destinations(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str course_id: Only retreive resources having `courseId`
:param str more: Value for this parameter will be provided in the 'more' property of registration lists, where needed. An opaque value, construction and parsing may change without notice.
:param datetime since: Only items updated since the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param datetime until: Only items updated before the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param list[str] tags:
:param str datetime_filter: A string describing what the since/until parameters will be applied to. Options are: 'created' or 'updated'. If not provided, it will default to `updated`.
:param str order_by: Optional enum parameter for specifying the field and order by which to sort the results. Defaults to updated_asc
:return: DestinationListSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_destinations_with_http_info(**kwargs)
else:
(data) = self.get_destinations_with_http_info(**kwargs)
return data
def get_destinations_with_http_info(self, **kwargs):
"""
getAllDestinations
Get a list of destinations.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_destinations_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str course_id: Only retreive resources having `courseId`
:param str more: Value for this parameter will be provided in the 'more' property of registration lists, where needed. An opaque value, construction and parsing may change without notice.
:param datetime since: Only items updated since the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param datetime until: Only items updated before the specified ISO 8601 TimeStamp (inclusive) are included. If a time zone is not specified, UTC time zone will be used.
:param list[str] tags:
:param str datetime_filter: A string describing what the since/until parameters will be applied to. Options are: 'created' or 'updated'. If not provided, it will default to `updated`.
:param str order_by: Optional enum parameter for specifying the field and order by which to sort the results. Defaults to updated_asc
:return: DestinationListSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['course_id', 'more', 'since', 'until', 'tags', 'datetime_filter', 'order_by']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_destinations" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/dispatch/destinations'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'course_id' in params:
query_params['courseId'] = params['course_id']
if 'more' in params:
query_params['more'] = params['more']
if 'since' in params:
query_params['since'] = params['since']
if 'until' in params:
query_params['until'] = params['until']
if 'tags' in params:
query_params['tags'] = params['tags']
collection_formats['tags'] = 'csv'
if 'datetime_filter' in params:
query_params['datetimeFilter'] = params['datetime_filter']
if 'order_by' in params:
query_params['orderBy'] = params['order_by']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DestinationListSchema',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_dispatch(self, dispatch_id, **kwargs):
"""
Get dispatch by id.
Get the dispatch with `dispatchId`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dispatch(dispatch_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dispatch_id: Identifier for the dispatch (required)
:return: DispatchSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_dispatch_with_http_info(dispatch_id, **kwargs)
else:
(data) = self.get_dispatch_with_http_info(dispatch_id, **kwargs)
return data
def get_dispatch_with_http_info(self, dispatch_id, **kwargs):
"""
Get dispatch by id.
Get the dispatch with `dispatchId`.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dispatch_with_http_info(dispatch_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dispatch_id: Identifier for the dispatch (required)
:return: DispatchSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dispatch_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dispatch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dispatch_id' is set
if ('dispatch_id' not in params) or (params['dispatch_id'] is None):
raise ValueError("Missing the required parameter `dispatch_id` when calling `get_dispatch`")
collection_formats = {}
resource_path = '/dispatch/dispatches/{dispatchId}'.replace('{format}', 'json')
path_params = {}
if 'dispatch_id' in params:
path_params['dispatchId'] = params['dispatch_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] | |
self.locationUri = locationUri
self.parameters = parameters
self.privileges = privileges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.description = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.locationUri = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.parameters = {}
(_ktype70, _vtype71, _size69 ) = iprot.readMapBegin()
for _i73 in xrange(_size69):
_key74 = iprot.readString();
_val75 = iprot.readString();
self.parameters[_key74] = _val75
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.privileges = PrincipalPrivilegeSet()
self.privileges.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Database')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.description != None:
oprot.writeFieldBegin('description', TType.STRING, 2)
oprot.writeString(self.description)
oprot.writeFieldEnd()
if self.locationUri != None:
oprot.writeFieldBegin('locationUri', TType.STRING, 3)
oprot.writeString(self.locationUri)
oprot.writeFieldEnd()
if self.parameters != None:
oprot.writeFieldBegin('parameters', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter76,viter77 in self.parameters.items():
oprot.writeString(kiter76)
oprot.writeString(viter77)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.privileges != None:
oprot.writeFieldBegin('privileges', TType.STRUCT, 5)
self.privileges.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SerDeInfo:
"""
Attributes:
- name
- serializationLib
- parameters
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'serializationLib', None, None, ), # 2
(3, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, name=None, serializationLib=None, parameters=None,):
self.name = name
self.serializationLib = serializationLib
self.parameters = parameters
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.serializationLib = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.parameters = {}
(_ktype79, _vtype80, _size78 ) = iprot.readMapBegin()
for _i82 in xrange(_size78):
_key83 = iprot.readString();
_val84 = iprot.readString();
self.parameters[_key83] = _val84
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SerDeInfo')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.serializationLib != None:
oprot.writeFieldBegin('serializationLib', TType.STRING, 2)
oprot.writeString(self.serializationLib)
oprot.writeFieldEnd()
if self.parameters != None:
oprot.writeFieldBegin('parameters', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter85,viter86 in self.parameters.items():
oprot.writeString(kiter85)
oprot.writeString(viter86)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Order:
"""
Attributes:
- col
- order
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'col', None, None, ), # 1
(2, TType.I32, 'order', None, None, ), # 2
)
def __init__(self, col=None, order=None,):
self.col = col
self.order = order
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.col = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.order = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Order')
if self.col != None:
oprot.writeFieldBegin('col', TType.STRING, 1)
oprot.writeString(self.col)
oprot.writeFieldEnd()
if self.order != None:
oprot.writeFieldBegin('order', TType.I32, 2)
oprot.writeI32(self.order)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StorageDescriptor:
"""
Attributes:
- cols
- location
- inputFormat
- outputFormat
- compressed
- numBuckets
- serdeInfo
- bucketCols
- sortCols
- parameters
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'cols', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 1
(2, TType.STRING, 'location', None, None, ), # 2
(3, TType.STRING, 'inputFormat', None, None, ), # 3
(4, TType.STRING, 'outputFormat', None, None, ), # 4
(5, TType.BOOL, 'compressed', None, None, ), # 5
(6, TType.I32, 'numBuckets', None, None, ), # 6
(7, TType.STRUCT, 'serdeInfo', (SerDeInfo, SerDeInfo.thrift_spec), None, ), # 7
(8, TType.LIST, 'bucketCols', (TType.STRING,None), None, ), # 8
(9, TType.LIST, 'sortCols', (TType.STRUCT,(Order, Order.thrift_spec)), None, ), # 9
(10, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 10
)
def __init__(self, cols=None, location=None, inputFormat=None, outputFormat=None, compressed=None, numBuckets=None, serdeInfo=None, bucketCols=None, sortCols=None, parameters=None,):
self.cols = cols
self.location = location
self.inputFormat = inputFormat
self.outputFormat = outputFormat
self.compressed = compressed
self.numBuckets = numBuckets
self.serdeInfo = serdeInfo
self.bucketCols = bucketCols
self.sortCols = sortCols
self.parameters = parameters
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.cols = []
(_etype90, _size87) = iprot.readListBegin()
for _i91 in xrange(_size87):
_elem92 = FieldSchema()
_elem92.read(iprot)
self.cols.append(_elem92)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.location = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.inputFormat = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.outputFormat = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.compressed = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.numBuckets = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRUCT:
self.serdeInfo = SerDeInfo()
self.serdeInfo.read(iprot)
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.bucketCols = []
(_etype96, _size93) = iprot.readListBegin()
for _i97 in xrange(_size93):
_elem98 = iprot.readString();
self.bucketCols.append(_elem98)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.sortCols = []
(_etype102, _size99) = iprot.readListBegin()
for _i103 in xrange(_size99):
_elem104 = Order()
_elem104.read(iprot)
self.sortCols.append(_elem104)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.MAP:
self.parameters = {}
(_ktype106, _vtype107, _size105 ) = iprot.readMapBegin()
for _i109 in xrange(_size105):
_key110 = iprot.readString();
_val111 = iprot.readString();
self.parameters[_key110] = _val111
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StorageDescriptor')
if self.cols != None:
oprot.writeFieldBegin('cols', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.cols))
for iter112 in self.cols:
iter112.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.location != None:
oprot.writeFieldBegin('location', TType.STRING, 2)
oprot.writeString(self.location)
oprot.writeFieldEnd()
if self.inputFormat != None:
oprot.writeFieldBegin('inputFormat', TType.STRING, 3)
oprot.writeString(self.inputFormat)
oprot.writeFieldEnd()
if self.outputFormat != None:
oprot.writeFieldBegin('outputFormat', TType.STRING, 4)
oprot.writeString(self.outputFormat)
oprot.writeFieldEnd()
if self.compressed != None:
oprot.writeFieldBegin('compressed', TType.BOOL, 5)
oprot.writeBool(self.compressed)
oprot.writeFieldEnd()
if self.numBuckets != None:
oprot.writeFieldBegin('numBuckets', TType.I32, 6)
oprot.writeI32(self.numBuckets)
oprot.writeFieldEnd()
if self.serdeInfo != None:
oprot.writeFieldBegin('serdeInfo', TType.STRUCT, 7)
self.serdeInfo.write(oprot)
oprot.writeFieldEnd()
if self.bucketCols != None:
oprot.writeFieldBegin('bucketCols', TType.LIST, 8)
oprot.writeListBegin(TType.STRING, len(self.bucketCols))
for iter113 in self.bucketCols:
oprot.writeString(iter113)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.sortCols != None:
oprot.writeFieldBegin('sortCols', TType.LIST, 9)
oprot.writeListBegin(TType.STRUCT, len(self.sortCols))
for iter114 in self.sortCols:
iter114.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.parameters != None:
oprot.writeFieldBegin('parameters', TType.MAP, 10)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))
for kiter115,viter116 in self.parameters.items():
oprot.writeString(kiter115)
oprot.writeString(viter116)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Table:
| |
which PLS2 was run.
"""
self.settingsDict = {}
self.settingsDict['numComp'] = self.numPC
self.settingsDict['arrX'] = self.arrX_input
self.settingsDict['arrY'] = self.arrY_input
self.settingsDict['Xstand'] = self.Xstand
self.settingsDict['Ystand'] = self.Ystand
self.settingsDict['analysed X'] = self.arrX
self.settingsDict['analysed Y'] = self.arrY
self.settingsDict['cv type'] = self.cvType
return self.settingsDict
def X_means(self):
"""
Returns a vector holding the column means of X.
"""
return np.average(self.arrX_input, axis=0).reshape(1,-1)
def X_scores(self):
"""
Returns array holding scores of array X. First column holds scores
for component 1, second column holds scores for component 2, etc.
"""
return self.arrT
def X_loadings(self):
"""
Returns array holding loadings of array X. Rows represent variables
and columns represent components. First column holds loadings for
component 1, second column holds scores for component 2, etc.
"""
return self.arrP
def X_loadingWeights(self):
"""
Returns an array holding loadings weights of array X.
"""
return self.arrW
def X_corrLoadings(self):
"""
Returns array holding correlation loadings of array X. First column
holds correlation loadings for component 1, second column holds
correlation loadings for component 2, etc.
"""
# Creates empty matrix for correlation loadings
arr_XcorrLoadings = np.zeros((np.shape(self.arrT)[1], np.shape(self.arrP)[0]), float)
# Compute correlation loadings:
# For each PC in score matrix
for PC in range(np.shape(self.arrT)[1]):
PCscores = self.arrT[:, PC]
# For each variable/attribute in original matrix (not meancentered)
for var in range(np.shape(self.arrX)[1]):
origVar = self.arrX[:, var]
corrs = np.corrcoef(PCscores, origVar)
arr_XcorrLoadings[PC, var] = corrs[0,1]
self.arr_XcorrLoadings = np.transpose(arr_XcorrLoadings)
return self.arr_XcorrLoadings
def X_residuals(self):
"""
Returns a dictionary holding the residual arrays for array X after
each computed component. Dictionary key represents order of component.
"""
# Create empty dictionary that will hold residuals
X_residualsDict = {}
# Fill dictionary with residuals arrays from residuals list
for ind, item in enumerate(self.X_residualsList):
X_residualsDict[ind] = item
return X_residualsDict
def X_calExplVar(self):
"""
Returns a list holding the calibrated explained variance for
each component. First number in list is for component 1, second number
for component 2, etc.
"""
return self.XcalExplVarList
def X_cumCalExplVar_indVar(self):
"""
Returns an array holding the cumulative calibrated explained variance
for each variable in X after each component. First row represents zero
components, second row represents one component, third row represents
two components, etc. Columns represent variables.
"""
return self.cumCalExplVarXarr_indVar
def X_cumCalExplVar(self):
"""
Returns a list holding the cumulative calibrated explained variance
for array X after each component.
"""
return self.XcumCalExplVarList
def X_predCal(self):
"""
Returns a dictionary holding the predicted arrays Xhat from
calibration after each computed component. Dictionary key represents
order of component.
"""
return self.calXpredDict
def X_PRESSE_indVar(self):
"""
Returns array holding PRESSE for each individual variable in X
acquired through calibration after each computed component. First row
is PRESSE for zero components, second row for component 1, third row
for component 2, etc.
"""
return self.PRESSEarr_indVar_X
def X_PRESSE(self):
"""
Returns array holding PRESSE across all variables in X acquired
through calibration after each computed component. First row is PRESSE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSE_total_list_X
def X_MSEE_indVar(self):
"""
Returns an array holding MSEE for each variable in array X acquired
through calibration after each computed component. First row holds MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEEarr_indVar_X
def X_MSEE(self):
"""
Returns an array holding MSEE across all variables in X acquired
through calibration after each computed component. First row is MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEE_total_list_X
def X_RMSEE_indVar(self):
"""
Returns an array holding RMSEE for each variable in array X acquired
through calibration after each component. First row holds RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEEarr_indVar_X
def X_RMSEE(self):
"""
Returns an array holding RMSEE across all variables in X acquired
through calibration after each computed component. First row is RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEE_total_list_X
def X_valExplVar(self):
"""
Returns a list holding the validated explained variance for X after
each component. First number in list is for component 1, second number
for component 2, third number for component 3, etc.
"""
return self.XvalExplVarList
def X_cumValExplVar_indVar(self):
"""
Returns an array holding the cumulative validated explained variance
for each variable in X after each component. First row represents
zero components, second row represents component 1, third row for
compnent 2, etc. Columns represent variables.
"""
return self.cumValExplVarXarr_indVar
def X_cumValExplVar(self):
"""
Returns a list holding the cumulative validated explained variance
for array X after each component. First number represents zero
components, second number represents component 1, etc.
"""
return self.XcumValExplVarList
def X_predVal(self):
"""
Returns dictionary holding arrays of predicted Xhat after each
component from validation. Dictionary key represents order of
component.
"""
return self.valXpredDict
def X_PRESSCV_indVar(self):
"""
Returns array holding PRESSCV for each individual variable in X
acquired through cross validation after each computed component. First
row is PRESSCV for zero components, second row for component 1, third
row for component 2, etc.
"""
return self.PRESSCVarr_indVar_X
def X_PRESSCV(self):
"""
Returns an array holding PRESSCV across all variables in X acquired
through cross validation after each computed component. First row is
PRESSCV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSCV_total_list_X
def X_MSECV_indVar(self):
"""
Returns an arrary holding MSECV for each variable in X acquired through
cross validation. First row is MSECV for zero components, second row
for component 1, etc.
"""
return self.MSECVarr_indVar_X
def X_MSECV(self):
"""
Returns an array holding MSECV across all variables in X acquired
through cross validation after each computed component. First row is
MSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSECV_total_list_X
def X_RMSECV_indVar(self):
"""
Returns an arrary holding RMSECV for each variable in X acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSECVarr_indVar_X
def X_RMSECV(self):
"""
Returns an array holding RMSECV across all variables in X acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSECV_total_list_X
def X_scores_predict(self, Xnew, numComp=None):
"""
Returns array of X scores from new X data using the exsisting model.
Rows represent objects and columns represent components.
"""
if numComp == None:
numComp = self.numPC
assert numComp <= self.numPC, ValueError('Maximum numComp = ' + str(self.numPC))
assert numComp > -1, ValueError('numComp must be >= 0')
# First pre-process new X data accordingly
if self.Xstand:
x_new = (Xnew - np.average(self.arrX_input, axis=0)) / np.std(self.arrX_input, ddof=1)
else:
x_new = (Xnew - np.average(self.arrX_input, axis=0))
# W*inv(P'W)
return np.dot(x_new, np.dot(self.arrW[:,0:numComp],
np.linalg.inv(np.dot(np.transpose(self.arrP[:,0:numComp]),
self.arrW[:,0:numComp]))))
def scoresRegressionCoeffs(self):
"""
Returns a one dimensional array holding regression coefficients between
scores of array X and Y.
"""
return self.arrC
def Y_means(self):
"""
Returns a vector holding the column means of array Y.
"""
return np.average(self.arrY_input, axis=0).reshape(1,-1)
def Y_scores(self):
"""
Returns an array holding loadings C of array Y. Rows represent
variables and columns represent components. First column for
component 1, second columns for component 2, etc.
"""
return self.arrU
def Y_loadings(self):
"""
Returns an array holding loadings C of array Y. Rows represent
variables and columns represent components. First column for
component 1, second columns for component 2, etc.
"""
return self.arrQ_alt
def Y_corrLoadings(self):
"""
Returns array holding correlation loadings of array X. First column
holds correlation loadings for component 1, second column holds
correlation loadings for component 2, etc.
"""
# Creates empty matrix for correlation loadings
arr_YcorrLoadings = np.zeros((np.shape(self.arrT)[1], np.shape(self.arrQ)[0]), float)
# Compute correlation loadings:
# For each PC in score matrix
for PC in range(np.shape(self.arrT)[1]):
PCscores = self.arrT[:, PC]
# For each variable/attribute in original matrix (not meancentered)
for var | |
from flask import jsonify, request
from marshmallow import ValidationError
from app import schemas, models
from flask_restx import Resource, reqparse
import flask_praetorian
from app import task_ns as ns
from app.api.task.task_utilities.task_socket_actions import *
from app.api.task.task_utilities.task_socket_functions import emit_socket_broadcast, emit_socket_assignment_broadcast
from app.api.task.task_utilities.taskfunctions import set_previous_relay_uuids, get_filtered_query_by_status, \
get_filtered_query_by_status_non_relays, roles_check_and_assign_user, get_items_before_parent
from app.api.functions.utilities import add_item_to_delete_queue, remove_item_from_delete_queue, get_page, \
get_query
from app.api.functions.viewfunctions import load_request_into_object
from app.api.functions.errors import internal_error, not_found, forbidden_error, schema_validation_error, \
unprocessable_entity_error, bad_request_error
from app.exceptions import ObjectNotFoundError, SchemaValidationError, AlreadyFlaggedForDeletionError, \
ProtectedFieldError
from app.api.task.task_utilities.decorators import check_rider_match
from app.api.functions.utilities import get_object
from flask_praetorian import utilities
from app import db
task_schema = schemas.TaskSchema(exclude=("comments",))
tasks_schema = schemas.TaskSchema(many=True, exclude=("comments",))
tasks_parent_schema = schemas.TasksParentSchema(many=True)
assigned_users_schema = schemas.UserSchema(many=True)
TASK = models.Objects.TASK
TASK_PARENT = models.Objects.TASK_PARENT
DELETE_FLAG = models.Objects.DELETE_FLAG
@ns.route('/<task_id>/restore', endpoint="task_undelete")
class TaskRestore(Resource):
@flask_praetorian.roles_accepted("admin", "coordinator")
def put(self, task_id):
try:
task = get_object(TASK, task_id, with_deleted=True)
except ObjectNotFoundError:
return not_found(TASK, task_id)
if task.deleted:
delete_queue_task = get_object(DELETE_FLAG, task.uuid)
for deliverable in task.deliverables:
check = get_object(DELETE_FLAG, deliverable.uuid)
if check.time_created >= delete_queue_task.time_created and check.active:
remove_item_from_delete_queue(deliverable)
remove_item_from_delete_queue(task)
else:
return {'uuid': str(task.uuid), 'message': 'Task {} not flagged for deletion.'.format(task.uuid)}, 200
db.session.flush()
task_parent = get_object(models.Objects.TASK_PARENT, task.parent_id)
set_previous_relay_uuids(task_parent)
db.session.commit()
emit_socket_broadcast(task_schema.dump(task), RESTORE_TASK, uuid=task.uuid)
return {'uuid': str(task.uuid), 'message': 'Task {} deletion flag removed.'.format(task.uuid)}, 200
@ns.route('/<task_id>', endpoint="task_detail")
class Task(Resource):
@flask_praetorian.auth_required
def get(self, task_id):
try:
return jsonify(task_schema.dump(get_object(TASK, task_id)))
except ObjectNotFoundError:
return not_found(TASK, task_id)
@flask_praetorian.roles_accepted('admin', 'coordinator')
def delete(self, task_id):
try:
task = get_object(TASK, task_id)
except ObjectNotFoundError:
return not_found(TASK, task_id)
try:
add_item_to_delete_queue(task)
for deliverable in task.deliverables:
if not deliverable.deleted:
add_item_to_delete_queue(deliverable)
except AlreadyFlaggedForDeletionError:
emit_socket_broadcast({}, DELETE_TASK, uuid=task_id)
return {'uuid': str(task.uuid), 'message': "Task queued for deletion"}, 202
task_parent = get_object(models.Objects.TASK_PARENT, task.parent_id)
set_previous_relay_uuids(task_parent)
db.session.commit()
emit_socket_broadcast({}, DELETE_TASK, uuid=task_id)
return {'uuid': str(task.uuid), 'message': "Task queued for deletion"}, 202
@flask_praetorian.auth_required
@check_rider_match
# @check_parent_or_collaborator_or_admin_match
def patch(self, task_id):
try:
task = get_object(TASK, task_id)
if task.deleted:
return not_found(TASK, task_id)
except ObjectNotFoundError:
return not_found(TASK, task_id)
try:
load_request_into_object(TASK, instance=task)
except ValidationError as e:
return schema_validation_error(e, object_id=task_id)
except ProtectedFieldError as e:
return forbidden_error(e, object_id=task_id)
task_parent = get_object(models.Objects.TASK_PARENT, task.parent_id)
set_previous_relay_uuids(task_parent)
db.session.commit()
socket_payload = request.get_json()
db.session.flush()
task_dump = task_schema.dump(task)
try:
etag = task_dump['etag']
except KeyError:
etag = ""
socket_payload['etag'] = etag
emit_socket_broadcast(socket_payload, UPDATE_TASK, uuid=task_id)
return {"etag": etag, "uuid": str(task.uuid), 'message': 'Task {} updated.'.format(task.uuid)}
@ns.route(
'/<task_id>/assigned_users',
endpoint="tasks_assign_user")
class TasksAssignees(Resource):
@flask_praetorian.auth_required
def get(self, task_id):
parser = reqparse.RequestParser()
parser.add_argument("role", type=str, location="args")
args = parser.parse_args()
try:
task = get_object(TASK, task_id)
except ObjectNotFoundError:
return not_found(TASK, task_id)
if args['role'] == "rider":
return assigned_users_schema.dump(task.assigned_riders)
elif args['role'] == "coordinator":
return assigned_users_schema.dump(task.assigned_coordinators)
else:
combined = []
combined.extend(task.assigned_coordinators)
combined.extend(task.assigned_riders)
return assigned_users_schema.dump(combined)
@flask_praetorian.roles_accepted('admin', 'coordinator', 'rider')
# @check_parent_or_collaborator_or_admin_match
def put(self, task_id):
try:
task = get_object(TASK, task_id)
if task.deleted:
return not_found(TASK, task_id)
except ObjectNotFoundError:
return not_found(TASK, task_id)
parser = reqparse.RequestParser()
parser.add_argument("role", type=str, location="args")
parser.add_argument('user_uuid')
args = parser.parse_args()
user_uuid = args['user_uuid']
try:
user = get_object(models.Objects.USER, user_uuid)
if user.deleted:
return not_found(models.Objects.USER, user_uuid)
except ObjectNotFoundError:
return not_found(models.Objects.USER, user_uuid)
try:
socket_update_type = roles_check_and_assign_user(task, user, args['role'])
except SchemaValidationError as e:
return bad_request_error(str(e), task_id)
db.session.commit()
request_json = request.get_json()
emit_socket_broadcast(request_json, socket_update_type, uuid=str(task.uuid))
emit_socket_assignment_broadcast(task_schema.dump(task), socket_update_type, str(user.uuid))
return {'uuid': str(task.uuid), 'message': 'Task {} updated.'.format(task.uuid)}, 200
@flask_praetorian.roles_accepted('admin', 'coordinator', 'rider')
# @check_parent_or_collaborator_or_admin_match
def delete(self, task_id):
try:
task = get_object(TASK, task_id)
if task.deleted:
return not_found(TASK, task_id)
except ObjectNotFoundError:
return not_found(TASK, task_id)
parser = reqparse.RequestParser()
parser.add_argument('user_uuid')
parser.add_argument("role", type=str, location="args")
args = parser.parse_args()
user_uuid = args['user_uuid']
try:
user = get_object(models.Objects.USER, user_uuid)
if user.deleted:
return not_found(models.Objects.USER, user_uuid)
except ObjectNotFoundError:
return not_found(models.Objects.USER, user_uuid)
if args['role'] == "rider":
filtered_riders = list(filter(lambda u: u.uuid != user.uuid, task.assigned_riders))
task.assigned_riders = filtered_riders
socket_update_type = REMOVE_ASSIGNED_RIDER_FROM_TASK
elif args['role'] == "coordinator":
filtered_coordinators = list(filter(lambda u: u.uuid != user.uuid, task.assigned_coordinators))
task.assigned_coordinators = filtered_coordinators
socket_update_type = REMOVE_ASSIGNED_COORDINATOR_FROM_TASK
else:
return forbidden_error("Type of role must be specified.", task_id)
db.session.add(task)
db.session.commit()
request_json = request.get_json()
emit_socket_broadcast(request_json, socket_update_type, uuid=task_id)
emit_socket_assignment_broadcast(task_schema.dump(task), socket_update_type, user_uuid)
return {'uuid': str(task.uuid), 'message': 'Task {} updated.'.format(task.uuid)}, 200
@ns.route('s',
endpoint="tasks_list_all")
class Tasks(Resource):
@flask_praetorian.roles_accepted('coordinator', 'admin')
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, location="args")
parser.add_argument("role", type=str, location="args")
parser.add_argument("order", type=str, location="args")
parser.add_argument("status", type=str, location="args")
parser.add_argument("before_parent", type=int, location="args")
args = parser.parse_args()
page = args['page'] if args['page'] is not None else 1
status = args['status']
before_parent = args['before_parent'] if args['before_parent'] else 0
order = args['order'] if args['order'] else "descending"
query = models.Task.query
# filter deleted tasks
query_deleted = query.filter(
models.Task.deleted.is_(False)
)
filtered = get_filtered_query_by_status(query_deleted, status)
filtered_ordered = filtered.order_by(models.Task.parent_id.desc(), models.Task.order_in_relay)
# TODO: figure out how to enclose all task relays when paginate cuts some of them off
items = get_items_before_parent(before_parent, page, order, filtered_ordered)
except ObjectNotFoundError:
return not_found(TASK)
except Exception as e:
raise
return internal_error(e)
if len(items) == 0:
pass
# return not_found(TASK)
return tasks_schema.dump(items)
@flask_praetorian.auth_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("auto_assign_role", type=str, location="args")
parser.add_argument("user_uuid", type=str, location="args")
args = parser.parse_args()
try:
task = load_request_into_object(TASK)
except SchemaValidationError as e:
return schema_validation_error(str(e))
if task.parent_id:
try:
parent = get_object(TASK_PARENT, task.parent_id)
except ObjectNotFoundError:
return not_found(TASK_PARENT, task.parent_id)
next_order_in_relay_int = parent.relays_with_deleted_cancelled_rejected.count() + 1
# TODO: could this go into marshmallow schema validation?
if parent.relays.count() > 19:
return forbidden_error("Cannot add more than 19 relays to a job", task.parent_id)
else:
new_parent = models.TasksParent()
db.session.add(new_parent)
db.session.flush()
# TODO: When organisations tables are implemented, make first four characters be from there
new_parent.reference = "FEVS-{}".format(new_parent.id)
next_order_in_relay_int = 1
task.parent_id = new_parent.id
task.order_in_relay = next_order_in_relay_int
author = utilities.current_user()
task.author_uuid = author.uuid
db.session.add(task)
db.session.flush()
if args['auto_assign_role']:
# pass off the task, user and role off to the user assign function to make sure all is well with the request
user_uuid = args['user_uuid']
try:
assign_user = get_object(models.Objects.USER, user_uuid)
if assign_user.deleted:
return not_found(models.Objects.USER, user_uuid)
except ObjectNotFoundError:
return not_found(models.Objects.USER, user_uuid)
try:
socket_update_type = roles_check_and_assign_user(task, assign_user, args['auto_assign_role'])
request_json = request.get_json()
emit_socket_broadcast(request_json, socket_update_type, uuid=str(task.uuid))
emit_socket_assignment_broadcast(task_schema.dump(task), socket_update_type, str(assign_user.uuid))
except SchemaValidationError as e:
return bad_request_error(str(e), task.uuid)
task_parent = get_object(models.Objects.TASK_PARENT, task.parent_id)
set_previous_relay_uuids(task_parent)
db.session.commit()
return {
'uuid': str(task.uuid),
'time_created': str(task.time_created),
'reference': str(task.reference),
'message': 'Task {} created'.format(task.uuid),
'author_uuid': str(task.author_uuid),
'parent_id': str(task.parent_id),
'order_in_relay': str(task.order_in_relay)
}, 201
@ns.route('s/<user_uuid>',
endpoint="tasks_list")
class UsersTasks(Resource):
@flask_praetorian.auth_required
def get(self, user_uuid):
if not user_uuid:
return not_found(models.Objects.USER)
try:
requested_user = get_object(models.Objects.USER, user_uuid)
if not requested_user:
return not_found(models.Objects.USER, user_uuid)
if requested_user.deleted:
return not_found(requested_user.object_type, user_uuid)
except ObjectNotFoundError:
return not_found(models.Objects.USER, user_uuid)
try:
# TODO: add page size querystring
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, location="args")
parser.add_argument("role", type=str, location="args")
parser.add_argument("order", type=str, location="args")
parser.add_argument("status", type=str, location="args")
parser.add_argument("before_parent", type=int, location="args")
args = parser.parse_args()
page = args['page'] if args['page'] is not None else 1
role = args['role']
status = args['status']
before_parent = args['before_parent'] if args['before_parent'] else 0
order = args['order'] if args['order'] else "descending"
if role == "rider":
query = requested_user.tasks_as_rider
elif role == "coordinator":
query = requested_user.tasks_as_coordinator
elif role == "author":
query = requested_user.tasks_as_author
else:
query = requested_user.tasks_as_coordinator
# filter deleted tasks
query_deleted = query.filter(
models.Task.deleted.is_(False)
)
if role == "coordinator":
filtered = get_filtered_query_by_status(query_deleted, status)
else:
filtered = get_filtered_query_by_status_non_relays(query_deleted, status)
filtered_ordered = filtered.order_by(models.Task.parent_id.desc(), models.Task.order_in_relay)
# TODO: figure out how to enclose all task relays when paginate cuts some of them off
items = get_items_before_parent(before_parent, page, order, filtered_ordered)
except ObjectNotFoundError:
return not_found(TASK)
except Exception as e:
raise
return internal_error(e)
if len(items) == 0:
pass
# return not_found(TASK)
return tasks_schema.dump(items)
@ns.route('/<task_uuid>/destinations',
endpoint="task_destinations")
class UsersTasks(Resource):
@flask_praetorian.auth_required
def put(self, task_uuid):
parser = reqparse.RequestParser()
parser.add_argument("pickup_location_uuid", type=str)
parser.add_argument("dropoff_location_uuid", type=str)
args = parser.parse_args()
try:
task = get_object(TASK, task_uuid)
except ObjectNotFoundError:
return not_found(TASK, task_uuid)
pickup_socket_update = None
dropoff_socket_update = None
if args['pickup_location_uuid']:
location_uuid = args['pickup_location_uuid']
try:
location = get_object(models.Objects.LOCATION, location_uuid)
except ObjectNotFoundError:
return not_found(models.Objects.LOCATION, location_uuid)
task.pickup_location_uuid = location.uuid
pickup_socket_update = UPDATE_TASK_PICKUP_LOCATION
elif args['dropoff_location_uuid']:
location_uuid = args['dropoff_location_uuid']
try:
location = get_object(models.Objects.LOCATION, location_uuid)
except ObjectNotFoundError:
return not_found(models.Objects.LOCATION, location_uuid)
task.dropoff_location_uuid = location.uuid
dropoff_socket_update = UPDATE_TASK_DROPOFF_LOCATION
db.session.commit()
db.session.flush()
task_dump = task_schema.dump(task)
try:
etag = task_dump['etag']
except KeyError:
etag = ""
if pickup_socket_update:
socket_response = {"pickup_location": task_dump['pickup_location']}
emit_socket_broadcast(socket_response, pickup_socket_update, uuid=task_uuid)
elif dropoff_socket_update == UPDATE_TASK_DROPOFF_LOCATION:
socket_response = {"dropoff_location": task_dump['dropoff_location']}
emit_socket_broadcast(socket_response, dropoff_socket_update, uuid=task_uuid)
return {'etag': etag, 'uuid': str(task.uuid), 'message': 'Task {} updated.'.format(task.uuid)}, 200
@flask_praetorian.auth_required
def delete(self, task_uuid):
parser = reqparse.RequestParser()
parser.add_argument("destination", type=str, location="args")
args = parser.parse_args()
if not args['destination']:
return forbidden_error("Must specify either pickup or delivery in destination query parameter")
try:
task = get_object(TASK, task_uuid)
except ObjectNotFoundError:
return not_found(TASK, task_uuid)
if args['destination'] == "pickup":
socket_update_type = UPDATE_TASK_PICKUP_LOCATION
task.pickup_location_uuid = None
elif args['destination'] == "delivery":
socket_update_type = UPDATE_TASK_DROPOFF_LOCATION
task.dropoff_location_uuid = None
else:
return unprocessable_entity_error(
"Must specify pickup or delivery in destination parameter.",
object_id=task_uuid)
db.session.commit()
db.session.flush()
task_dump = task_schema.dump(task)
try:
etag = task_dump['etag']
except KeyError:
etag = ""
socket_response = {}
if socket_update_type == UPDATE_TASK_PICKUP_LOCATION:
socket_response = {"pickup_location": None}
elif socket_update_type == UPDATE_TASK_DROPOFF_LOCATION:
socket_response = {"dropoff_location": None}
emit_socket_broadcast(socket_response, socket_update_type, uuid=task_uuid)
return {'etag': etag, | |
<reponame>expertailab/embrelassess<gh_stars>1-10
import matplotlib.pyplot as plt
import seaborn
import numpy as np
import warnings
import itertools
import os
import os.path as osp
import torch
import pandas as pd
# The 'analyse' module provides methods for analysing embedding relation
# learn_results
def expand_test_result_df(df_test):
"""Adds columns to a DataFrame with test results
Args:
df_test DataFrame as produced by trainer.ModelTrainer, i.e. with columns
'tp', 'fp', 'fn', 'correct', 'total_examples' and 'examples_above_threshold'
Returns:
the input DataFrame with additional columns for 'precision', 'recall',
'acc'uracy, 'f1' measure and 'coverage' percentage.
"""
#print('type of df_test', str(type(df_test)))
#print('keys in df_test', df_test.keys())
df = df_test
epsilon = 0.00001 # avoid division by zero
df['precision'] = df['tp'] / (df['tp'] + df['fp'] + epsilon)
df['recall'] = df['tp'] / (df['tp'] + df['fn'] + epsilon)
df['acc'] = df['correct'] / df['examples_above_threshold']
df['f1'] = 2*df['tp'] / (2*df['tp'] + df['fp'] + df['fn'] + epsilon)
df['coverage'] = df['examples_above_threshold']/ (df['total_examples'] + epsilon)
return df
def aggregate_runs(learn_results):
"""Aggregate the run results for the input learn_results
Args:
learn_results a list of learn results as returned by learn or learn.load_results
Returns:
a list of test and randpredict aggregates over the runs for each
rel_name, rel_type and emb combination
"""
rel_name = learn_results['rel_name']
rel_type = learn_results['rel_type']
emb_model_results = learn_results['emb_model_results']
result = []
for emb in emb_model_results:
emb_results = emb_model_results[emb]
emress = EmbeddingModelResults(emb_results)
basic_agg = {
'rel_type': rel_type,
'rel_name': rel_name,
'emb': emb,
}
for test_agg in emress.calc_test_aggregates():
ba = {**basic_agg, **test_agg}
ba['result_type'] = 'test'
result.append(ba)
for rand_agg in emress.calc_randpredict_aggregates():
ra = {**basic_agg, **rand_agg}
ra['result_type'] = 'random'
result.append(ra)
return result
class EmbeddingModelResults():
def __init__(self, modress):
"""Provides methods for aggregating embedding model results
Args:
modress a list of embedding-model results
"""
self.modress = modress
for modres in self.modress:
expand_test_result_df(modres['test_df'])
expand_test_result_df(modres['test_random_result'])
expand_test_result_df(modres['pretrain_test_result'])
def calc_test_aggregates(self):
return self._calc_aggregates(self._test_val)
def calc_randpredict_aggregates(self):
return self._calc_aggregates(self._rand_val)
def calc_pretrain_aggregates(self):
return self._calc_aggregates(self._pretrain_val)
def models(self):
return set(self.extract_vals(self._res_val('model')))
def _calc_aggregates(self, metric_modres_to_val_fn):
result = []
for model in self.models():
agg = {}
n = None
for metric in ['acc', 'f1', 'precision', 'recall']:
metrics = self.extract_vals(
metric_modres_to_val_fn(metric),
modres_filter=lambda x: x['model'] == model
)
if not n:
n = len(metrics)
else:
assert(n == len(metrics))
agg['%s_avg' % metric] = np.mean(metrics)
agg['%s_std' % metric] = np.std(metrics)
agg['%s_min' % metric] = np.min(metrics)
agg['%s_max' % metric] = np.max(metrics)
agg['model'] = model
agg['datapoints'] = n
result.append(agg)
return result
def _testres(self, modres):
# plotter.expand needed to calculate 'acc', 'f1', etc. from
# raw tp, fp, fn, correct counts
# better to perform this as a separate step somewhere else...
df = modres['test_df']
no_threshold = df[df['threshold'] == 0.0] # df.loc[0]
# print('no_threshold_testres:', no_threshold)
return no_threshold
def _randres(self, modres):
return modres['test_random_result']
def _pretrain_res(self, modres):
return modres['pretrain_test_result']
def _test_val(self, key):
return lambda modres: self._testres(modres)[key]
def _rand_val(self, key):
return lambda modres: self._randres(modres)[key]
def _pretrain_val(self, key):
return lambda modres: self._pretrain_res(modres)[key]
def _res_val(self, key):
return lambda modres: modres[key]
def extract_vals(self, value_extractor, modres_filter=lambda x: True):
"""returns a list of values for a given list of model results"""
result = []
for model_result in self.modress:
if modres_filter(model_result):
result.append(value_extractor(model_result))
return result
def extract_val(self, value_extractor, modres_filter=lambda x: True):
vals = set(self.extract_vals(modres_filter, value_extractor))
if len(vals) == 1:
return vals.pop()
elif len(vals) == 0:
raise Exception('No value using ' + value_extractor)
else:
print('Multiple values for ' + value_extractor)
return vals.pop()
def summarise_rel_models(learn_rel_results, plotter):
"""DEPRECATED Summarises the data gathered while learning a relation
DEPRECATED, use aggregate_runs instead
Since the learn_rel_results contains multiple runs/model results for the
relation, this method provides a method for aggregating the results of
the different runs. Allowing us to calculate the mean and stdev metrics.
Also, since various models may have been tried in learn_rel, this
method performs a selection to choose to best performing model.
Args:
learn_rel_results object as output by method learn_rel
plotter object of type embrelassess.analyse.Plotter
Returns:
dictionary summarising the best model and the base model
"""
warnings.warn("summarise_rel_models is deprecated, use aggregate_runs instead")
rel_name = learn_rel_results['rel_name']
rel_type = learn_rel_results['rel_type']
pos_exs = learn_rel_results['pos_exs']
empty_result = {
"rel_name": rel_name, "rel_type": rel_type, "epochs": 0,
"best_acc": 0, "best_f1": 0, "best_prec": 0, "best_rec": 0,
"base_acc": 0.5, "base_f1": 0.5, "base_prec": 0.5, "base_rec": 0.5,
"best_model": "None", "best_model_type": "None",
"pos_exs": pos_exs}
def get_testres(model_result):
# plotter.expand needed to calculate 'acc', 'f1', etc. from
# raw tp, fp, fn, correct counts
# better to perform this as a separate step somewhere else...
return expand_test_result_df(model_result['test_df'].loc[0])
def get_randres(model_result):
return model_result['test_random_result']
def get_test_accuracy(model_result):
return get_testres(model_result)['acc']
def get_test_f1(model_result):
return get_testres(model_result)['f1']
def get_test_precision(model_result):
return get_testres(model_result)['precision']
def get_test_recall(model_result):
return get_testres(model_result)['recall']
def get_base_accuracy(model_result):
return get_randres(model_result)['acc']
def get_base_f1(model_results):
return get_randres(model_results)['f1']
def get_base_prec(model_results):
return get_randres(model_results)['precision']
def get_base_rec(model_results):
return get_randres(model_results)['recall']
def get_model(model_result):
return model_result['model']
def extract_vals(model_results, value_extractor):
"""returns a list of values for a given list of model results"""
result = []
for model_result in model_results:
result.append(value_extractor(model_result))
return result
def extract_val(model_results, value_extractor):
result = None
for model_result in model_results:
result = value_extractor(model_result)
return result
winner_model = None # winner agg_model_results
emb_agg_results = {}
emb_model_results = learn_rel_results['emb_model_results']
for emb_name in learn_rel_results:
model_results = emb_model_results[emb_name]
model = extract_val(model_results, get_model)
test_accs = extract_vals(model_results, get_test_accuracy)
test_f1s = extract_vals(model_results, get_test_f1)
test_precs = extract_vals(model_results, get_test_precision)
test_recs = extract_vals(model_results, get_test_recall)
ba_accs = extract_vals(model_results, get_base_accuracy)
ba_f1s = extract_vals(model_results, get_base_f1)
ba_precs = extract_vals(model_results, get_base_prec)
ba_recs = extract_vals(model_results, get_base_rec)
agg_model_results = {
"model": model,
"avg_acc": np.mean(test_accs), "std_acc": np.std(test_accs),
"avg_f1": np.mean(test_f1s), "std_f1": np.std(test_f1s),
"avg_prec": np.mean(test_precs), "std_prec": np.std(test_precs),
"avg_rec": np.mean(test_recs), "std_rec": np.std(test_recs),
"base_avg_acc": np.mean(ba_accs), "base_std_acc": np.std(ba_accs),
"base_avg_f1": np.mean(ba_f1s), "base_std_f1": np.std(ba_f1s),
"base_avg_prec": np.mean(ba_precs),
"base_std_prec": np.std(ba_precs),
"base_avg_rec": np.mean(ba_recs), "base_std_rec": np.std(ba_recs),
"results": model_results}
agg_models_results = emb_agg_results.get(emb_name, [])
agg_models_results.append(agg_model_results)
emb_agg_results[emb_name] = agg_models_results
print(
'%s acc %.3f+-%.3f f1 %.3f+-%.3f prec %.3f+-%.3f rec %.3f+-%.3f' %
(
model,
agg_model_results['avg_acc'], agg_model_results['std_acc'],
agg_model_results['avg_f1'], agg_model_results['std_f1'],
agg_model_results['avg_prec'], agg_model_results['std_prec'],
agg_model_results['avg_rec'], agg_model_results['std_rec']
)
)
def model_summary(model, name=None):
if not name:
name = model['model']
return '%s (avg_acc %.2f, avg_f1 %.2f)' % (
name, model['avg_acc'], model['avg_f1'])
if not winner_model:
winner_model = agg_model_results
elif winner_model['avg_acc'] > agg_model_results['avg_acc']:
print('Previous model %s is, on average, better than %s' %
(model_summary(winner_model),
model_summary(agg_model_results, name=model)))
else:
print('Previous model %s was, on average, worse than %s' %
(model_summary(winner_model),
model_summary(agg_model_results, name=model)))
winner_model = agg_model_results
if not winner_model:
return empty_result
def select_best_result(winner_model):
result = None
for model_result in winner_model['results']:
if not result:
result = model_result
else:
if get_test_accuracy(result) > get_test_accuracy(model_result):
result = result
else:
result = model_result
return result
best_result = select_best_result(winner_model)
# only place where plotter is used, maybe better to separate
# plotting from aggregation of data
plt = plotter.plot_learning(winner_model['results']['trainer_df'],
best_result['test_df'],
winner_model['results']['model'])
plt.show()
base_test_result = winner_model['test_random_result']
row = best_result['test_df'].loc[0]
result = {"rel_name": rel_name, "rel_type": rel_type, "epochs": epochs,
"best_acc": row['acc'], "best_f1": row['f1'],
"best_prec": row['precision'], "best_rec": row['recall'],
"base_acc": winner_trainer.acc(base_test_result),
"base_f1": winner_trainer.f1(base_test_result),
"base_prec": winner_trainer.precision(base_test_result),
"base_rec": winner_trainer.recall(base_test_result),
"best_model": winner_model['model'], "best_model_type": winner_model['model'],
"pos_exs": cnt}
for agg_model_results in agg_models_results:
model = agg_model_results['model']
result['%s_avg_acc' % model] = agg_model_results['avg_acc']
result['%s_std_acc' % model] = agg_model_results['std_acc']
result['%s_avg_f1' % model] = agg_model_results['avg_f1']
result['%s_std_f1' % model] = agg_model_results['std_f1']
return result
class Plotter():
def __init__(self):
print('Plotter')
self.colors = seaborn.color_palette()
def plot_learning_curve(self, df_training, model_name):
df = df_training
row_min = df.min()
row_max = df.max()
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df['step'], df['train/loss'], '-',
markersize=1, color=self.colors[0], alpha=.5,
label='train loss')
plt.plot(df['step'], df['valid/loss'], '-',
markersize=1, color=self.colors[1], alpha=.5,
label='valid loss')
plt.xlim((0, row_max['step']))
plt.ylim((min(row_min['train/loss'], row_min['valid/loss']),
max(row_max['train/loss'], row_max['valid/loss'])))
plt.xlabel('step')
plt.ylabel('loss')
plt.title('learning curve %s' % model_name)
plt.legend()
def plot_valid_acc(self, df_training, model_name):
df = df_training
# row_min = df.min()
row_max = df.max()
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df['step'], df['valid/precision'], '-',
markersize=1, color=self.colors[0], alpha=.5,
label='precision')
plt.plot(df['step'], df['valid/recall'], '-',
markersize=1, color=self.colors[1], alpha=.5,
label='recall')
plt.plot(df['step'], df['valid/acc'], '-',
markersize=1, color=self.colors[2], alpha=.5,
label='accuracy')
plt.plot(df['step'], df['valid/f1'], '-',
markersize=1, color=self.colors[3], alpha=.5,
label='f1')
plt.xlim((0, row_max['step']))
plt.ylim(0.0, 1.0)
plt.xlabel('step')
plt.ylabel('percent')
plt.legend()
plt.title('Validation results %s ' % model_name)
def plot_test_df(self, df_test, model_name):
df = expand_test_result_df(df_test)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df['threshold'], df['precision'], '-',
markersize=1, color=self.colors[0], alpha=.5,
label='precision')
plt.plot(df['threshold'], df['recall'], '-',
markersize=1, color=self.colors[1], alpha=.5,
label='recall')
plt.plot(df['threshold'], df['acc'], '-',
markersize=1, color=self.colors[2], alpha=.5,
label='accuracy')
plt.plot(df['threshold'], df['f1'], '-',
markersize=1, color=self.colors[4], alpha=.5,
label='f1')
plt.plot(df['threshold'], df['coverage'], '-',
markersize=1, color=self.colors[3], alpha=.5,
label='coverage')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.05)
plt.xlabel('threshold')
plt.ylabel('percent')
plt.legend()
plt.title('Test results %s ' % model_name)
def plot_learning(self, df_training, df_test, model_name,
n_row=2, n_col=2, figsize=(10, 6), dpi=300):
plt.figure(figsize=figsize, dpi=dpi)
# learning curve
plt.subplot(n_row, n_col, 1)
self.plot_learning_curve(df_training, model_name)
# validation p-r-acc
plt.subplot(n_row, n_col, 2)
self.plot_valid_acc(df_training, model_name)
# test p-r-acc
plt.subplot(n_row, n_col, 3)
self.plot_test_df(df_test, model_name)
fig = plt.gcf()
fig.tight_layout()
return plt
def _generate_histogram_series(df_rel, metric, agg, n_rels, learn_alg, min_val=0.4):
"""Creates and | |
0.434294481903252*log(1 +
0.08157272772815*m.x2660) + 0.434294481903252*log(1 + 0.06957939374923*m.x2661) +
0.434294481903252*log(1 + 0.2211715375748*m.x2662) + 0.434294481903252*log(1 + 0.190599795073*
m.x2663) + 0.434294481903252*log(1 + 0.1464862148544*m.x2664) + 0.434294481903252*log(1 +
0.4069493717309*m.x2665) + 0.434294481903252*log(1 + 0.08236445496461*m.x2666) +
0.434294481903252*log(1 + 0.2176954852261*m.x2667) + 0.434294481903252*log(1 + 0.4393362360224*
m.x2668) + 0.434294481903252*log(1 + 1.36741865003*m.x2669) + 0.434294481903252*log(1 +
3.35471186265*m.x2670) + 0.434294481903252*log(1 + 0.5593911096162*m.x2671) + 0.434294481903252*
log(1 + 0.3648285950059*m.x2672) + 0.434294481903252*log(1 + 1.33907556371*m.x2673) +
0.434294481903252*log(1 + 0.6641358050478*m.x2674) + 0.434294481903252*log(1 + 0.1192498259978*
m.x2675) + 0.434294481903252*log(1 + 0.690101002084*m.x2676) + 0.434294481903252*log(1 +
5.40800305531*m.x2677) + 0.434294481903252*log(1 + 0.5037762888931*m.x2678) + 0.434294481903252*
log(1 + 0.1497673627548*m.x2679) + 0.434294481903252*log(1 + 1.12917260436*m.x2680) +
0.434294481903252*log(1 + 0.1213637184725*m.x2681) + 0.434294481903252*log(1 + 0.2825966806632*
m.x2682) + 0.434294481903252*log(1 + 0.1206971332217*m.x2683) + 0.434294481903252*log(1 +
6.57832277628*m.x2684) + 0.434294481903252*log(1 + 0.9511490619748*m.x2685) + 0.434294481903252*
log(1 + 1.1209973133*m.x2686) + 0.434294481903252*log(1 + 11.22199203932*m.x2687) +
0.434294481903252*log(1 + 5.14635622913*m.x2688) + 0.434294481903252*log(1 + 0.1635068769648*
m.x2689) + 0.434294481903252*log(1 + 26.05667181658*m.x2690) + 0.434294481903252*log(1 +
1.78327218225*m.x2691) + 0.434294481903252*log(1 + 0.9217428973323*m.x2692) + 0.434294481903252*
log(1 + 0.3518226909124*m.x2693) + 0.434294481903252*log(1 + 0.2081335332514*m.x2694) +
0.434294481903252*log(1 + 0.1424956684182*m.x2695) + 0.434294481903252*log(1 + 0.1884134718502*
m.x2696) + 0.434294481903252*log(1 + 2.46806147296*m.x2697) + 0.434294481903252*log(1 +
0.4643144646646*m.x2698) + 0.434294481903252*log(1 + 1.08313846193*m.x2699) + 0.434294481903252*
log(1 + 1.68916431629*m.x2700) >= 7.5257498916)
m.c15 = Constraint(expr=0.434294481903252*log(1 + 127585.800118451*m.x2701) + 0.434294481903252*log(1 + 681481.644279934
*m.x2702) + 0.434294481903252*log(1 + 587428.19220859*m.x2703) + 0.434294481903252*log(1 +
360231.152750982*m.x2704) + 0.434294481903252*log(1 + 27290.659480689*m.x2705) +
0.434294481903252*log(1 + 63391.3703955907*m.x2706) + 0.434294481903252*log(1 + 139286.870307851
*m.x2707) + 0.434294481903252*log(1 + 852066.665264528*m.x2708) + 0.434294481903252*log(1 +
43747.480449771*m.x2709) + 0.434294481903252*log(1 + 134213.148899146*m.x2710) +
0.434294481903252*log(1 + 223338.352856014*m.x2711) + 0.434294481903252*log(1 + 14360.4245186601
*m.x2712) + 0.434294481903252*log(1 + 226220.079144514*m.x2713) + 0.434294481903252*log(1 +
23884.281232472*m.x2714) + 0.434294481903252*log(1 + 43213.9495418679*m.x2715) +
0.434294481903252*log(1 + 116350.064957986*m.x2716) + 0.434294481903252*log(1 + 270228.188889398
*m.x2717) + 0.434294481903252*log(1 + 45787.0475802472*m.x2718) + 0.434294481903252*log(1 +
188591.073480143*m.x2719) + 0.434294481903252*log(1 + 8012.86752921834*m.x2720) +
0.434294481903252*log(1 + 3960.47135899574*m.x2721) + 0.434294481903252*log(1 + 82791.3258014896
*m.x2722) + 0.434294481903252*log(1 + 126781.941307661*m.x2723) + 0.434294481903252*log(1 +
78272.1299737484*m.x2724) + 0.434294481903252*log(1 + 620139.642698473*m.x2725) +
0.434294481903252*log(1 + 899191.99224964*m.x2726) + 0.434294481903252*log(1 + 32939.7304148737*
m.x2727) + 0.434294481903252*log(1 + 65066.0281925357*m.x2728) + 0.434294481903252*log(1 +
1460531.57475557*m.x2729) + 0.434294481903252*log(1 + 124181.991792237*m.x2730) +
0.434294481903252*log(1 + 38866.4681433135*m.x2731) + 0.434294481903252*log(1 + 200017.891017003
*m.x2732) + 0.434294481903252*log(1 + 358414.399070475*m.x2733) + 0.434294481903252*log(1 +
159640.663903805*m.x2734) + 0.434294481903252*log(1 + 2686736.40481544*m.x2735) +
0.434294481903252*log(1 + 330542.809288823*m.x2736) + 0.434294481903252*log(1 + 204327.625537818
*m.x2737) + 0.434294481903252*log(1 + 428423.753464976*m.x2738) + 0.434294481903252*log(1 +
198802.397637941*m.x2739) + 0.434294481903252*log(1 + 728.47840592711*m.x2740) +
0.434294481903252*log(1 + 58278.0492011947*m.x2741) + 0.434294481903252*log(1 + 233742.725152348
*m.x2742) + 0.434294481903252*log(1 + 23342.0452788385*m.x2743) + 0.434294481903252*log(1 +
1127667.80422964*m.x2744) + 0.434294481903252*log(1 + 871972.584043438*m.x2745) +
0.434294481903252*log(1 + 368708.23806541*m.x2746) + 0.434294481903252*log(1 + 1400635.98161787*
m.x2747) + 0.434294481903252*log(1 + 83643.5691666665*m.x2748) + 0.434294481903252*log(1 +
70942.5123387862*m.x2749) + 0.434294481903252*log(1 + 8405.07157968976*m.x2750) +
0.434294481903252*log(1 + 5602.56752553276*m.x2751) + 0.434294481903252*log(1 + 255694.87558902*
m.x2752) + 0.434294481903252*log(1 + 123160.906587273*m.x2753) + 0.434294481903252*log(1 +
246590.02382166*m.x2754) + 0.434294481903252*log(1 + 461771.961012066*m.x2755) +
0.434294481903252*log(1 + 160827.129061381*m.x2756) + 0.434294481903252*log(1 + 309085.550923916
*m.x2757) + 0.434294481903252*log(1 + 239338.843341839*m.x2758) + 0.434294481903252*log(1 +
520165.41221464*m.x2759) + 0.434294481903252*log(1 + 5577516.53557818*m.x2760) +
0.434294481903252*log(1 + 357345.587865392*m.x2761) + 0.434294481903252*log(1 + 77251.7456658075
*m.x2762) + 0.434294481903252*log(1 + 814125.642970545*m.x2763) + 0.434294481903252*log(1 +
326621.261621797*m.x2764) + 0.434294481903252*log(1 + 42937.7691722905*m.x2765) +
0.434294481903252*log(1 + 63057.9059644848*m.x2766) + 0.434294481903252*log(1 + 144948.453373736
*m.x2767) + 0.434294481903252*log(1 + 23280.245403646*m.x2768) + 0.434294481903252*log(1 +
10091.5270529186*m.x2769) + 0.434294481903252*log(1 + 6000.15467193638*m.x2770) +
0.434294481903252*log(1 + 55358.095057159*m.x2771) + 0.434294481903252*log(1 + 175822.569253317*
m.x2772) + 0.434294481903252*log(1 + 174611.967978819*m.x2773) + 0.434294481903252*log(1 +
9610.34267494579*m.x2774) + 0.434294481903252*log(1 + 2054329.04019135*m.x2775) +
0.434294481903252*log(1 + 305503.523669148*m.x2776) + 0.434294481903252*log(1 + 31579.4741020547
*m.x2777) + 0.434294481903252*log(1 + 292773.909388192*m.x2778) + 0.434294481903252*log(1 +
195761.309221742*m.x2779) + 0.434294481903252*log(1 + 10817.9708446616*m.x2780) +
0.434294481903252*log(1 + 233548.041250569*m.x2781) + 0.434294481903252*log(1 + 80634.7257255015
*m.x2782) + 0.434294481903252*log(1 + 104930.299237506*m.x2783) + 0.434294481903252*log(1 +
206314.262926782*m.x2784) + 0.434294481903252*log(1 + 61080.8080357812*m.x2785) +
0.434294481903252*log(1 + 37867.1544237133*m.x2786) + 0.434294481903252*log(1 + 221757.65154874*
m.x2787) + 0.434294481903252*log(1 + 18396.1912735746*m.x2788) + 0.434294481903252*log(1 +
429856.015402877*m.x2789) + 0.434294481903252*log(1 + 363890.507910455*m.x2790) +
0.434294481903252*log(1 + 1005079.77758155*m.x2791) + 0.434294481903252*log(1 + 25818.6107517883
*m.x2792) + 0.434294481903252*log(1 + 60552.88274003*m.x2793) + 0.434294481903252*log(1 +
18930.7538260103*m.x2794) + 0.434294481903252*log(1 + 487969.507427098*m.x2795) +
0.434294481903252*log(1 + 37735.9715139204*m.x2796) + 0.434294481903252*log(1 + 22550.3511980749
*m.x2797) + 0.434294481903252*log(1 + 182954.264171678*m.x2798) + 0.434294481903252*log(1 +
1239539.24845071*m.x2799) + 0.434294481903252*log(1 + 1868012.12170319*m.x2800) +
0.434294481903252*log(1 + 50055.6989926287*m.x2801) + 0.434294481903252*log(1 + 100648.955094578
*m.x2802) + 0.434294481903252*log(1 + 153133.690132207*m.x2803) + 0.434294481903252*log(1 +
517409.667484786*m.x2804) + 0.434294481903252*log(1 + 658012.279919179*m.x2805) +
0.434294481903252*log(1 + 117495.178780339*m.x2806) + 0.434294481903252*log(1 + 461174.782566179
*m.x2807) + 0.434294481903252*log(1 + 866244.605881933*m.x2808) + 0.434294481903252*log(1 +
143303.36994073*m.x2809) + 0.434294481903252*log(1 + 354720.507214598*m.x2810) +
0.434294481903252*log(1 + 125414.817188458*m.x2811) + 0.434294481903252*log(1 + 68218.8980672165
*m.x2812) + 0.434294481903252*log(1 + 36606.4666743813*m.x2813) + 0.434294481903252*log(1 +
201662.68086507*m.x2814) + 0.434294481903252*log(1 + 42842.239618539*m.x2815) +
0.434294481903252*log(1 + 1961282.59355746*m.x2816) + 0.434294481903252*log(1 + 108426.561225503
*m.x2817) + 0.434294481903252*log(1 + 418084.797392811*m.x2818) + 0.434294481903252*log(1 +
80141.8873384253*m.x2819) + 0.434294481903252*log(1 + 343068.758238575*m.x2820) +
0.434294481903252*log(1 + 137346.445442479*m.x2821) + 0.434294481903252*log(1 + 966086.740325497
*m.x2822) + 0.434294481903252*log(1 + 23216.9451471539*m.x2823) + 0.434294481903252*log(1 +
535508.525935268*m.x2824) + 0.434294481903252*log(1 + 10665.710701586*m.x2825) +
0.434294481903252*log(1 + 92343.8119126466*m.x2826) + 0.434294481903252*log(1 + 208281.264738953
*m.x2827) + 0.434294481903252*log(1 + 164592.456502292*m.x2828) + 0.434294481903252*log(1 +
822068.756527917*m.x2829) + 0.434294481903252*log(1 + 250902.443342594*m.x2830) +
0.434294481903252*log(1 + 80290.3299099562*m.x2831) + 0.434294481903252*log(1 + 72946.4102121022
*m.x2832) + 0.434294481903252*log(1 + 18286.428182422*m.x2833) + 0.434294481903252*log(1 +
22077.7314339429*m.x2834) + 0.434294481903252*log(1 + 382.14859662873*m.x2835) +
0.434294481903252*log(1 + 202457.65966952*m.x2836) + 0.434294481903252*log(1 + 173613.033549638*
m.x2837) + 0.434294481903252*log(1 + 59601.8865907926*m.x2838) + 0.434294481903252*log(1 +
75592.3976647284*m.x2839) + 0.434294481903252*log(1 + 41444.2023158196*m.x2840) +
0.434294481903252*log(1 + 77764.3092323783*m.x2841) + 0.434294481903252*log(1 + 116873.499976211
*m.x2842) + 0.434294481903252*log(1 + 665814.280747214*m.x2843) + 0.434294481903252*log(1 +
309801.684051746*m.x2844) + 0.434294481903252*log(1 + 84003.2472903314*m.x2845) +
0.434294481903252*log(1 + 417206.267809332*m.x2846) + 0.434294481903252*log(1 + 824605.316370735
*m.x2847) + 0.434294481903252*log(1 + 27806.7462681524*m.x2848) + 0.434294481903252*log(1 +
49822.6820243827*m.x2849) + 0.434294481903252*log(1 + 500981.842078106*m.x2850) +
0.434294481903252*log(1 + 81042.8869442811*m.x2851) + 0.434294481903252*log(1 + 57456.1528019387
*m.x2852) + 0.434294481903252*log(1 + 123843.169912822*m.x2853) + 0.434294481903252*log(1 +
545888.321272684*m.x2854) + 0.434294481903252*log(1 + 355193.012017175*m.x2855) +
0.434294481903252*log(1 + 43198.2992950355*m.x2856) + 0.434294481903252*log(1 + 385733.943315301
*m.x2857) + 0.434294481903252*log(1 + 27733.7911811804*m.x2858) + 0.434294481903252*log(1 +
132143.368392514*m.x2859) + 0.434294481903252*log(1 + 714794.401563982*m.x2860) +
0.434294481903252*log(1 + 138032.053813804*m.x2861) + 0.434294481903252*log(1 + 603412.514933511
*m.x2862) + 0.434294481903252*log(1 + 324752.183861905*m.x2863) + 0.434294481903252*log(1 +
150906.850571571*m.x2864) + 0.434294481903252*log(1 + 133117.37667819*m.x2865) +
0.434294481903252*log(1 + 282760.45328616*m.x2866) + 0.434294481903252*log(1 + 274187.115684062*
m.x2867) + 0.434294481903252*log(1 + 49283.6960055476*m.x2868) + 0.434294481903252*log(1 +
352084.177625663*m.x2869) + 0.434294481903252*log(1 + 395366.802958047*m.x2870) +
0.434294481903252*log(1 + 94491.8155889039*m.x2871) + 0.434294481903252*log(1 + 889042.071750808
*m.x2872) + 0.434294481903252*log(1 + 221774.58085847*m.x2873) + 0.434294481903252*log(1 +
10520.3829066665*m.x2874) + 0.434294481903252*log(1 + 341557.82676441*m.x2875) +
0.434294481903252*log(1 + 6417.98408007412*m.x2876) + 0.434294481903252*log(1 + 88963.8997359288
*m.x2877) + 0.434294481903252*log(1 + 27530.4614411581*m.x2878) + 0.434294481903252*log(1 +
68474.6213149699*m.x2879) + 0.434294481903252*log(1 + 211858.422580226*m.x2880) +
0.434294481903252*log(1 + 472765.448741286*m.x2881) + 0.434294481903252*log(1 + 4700.77595866071
*m.x2882) + 0.434294481903252*log(1 + 176725.657077515*m.x2883) + 0.434294481903252*log(1 +
28952.1487807376*m.x2884) + 0.434294481903252*log(1 + 1786653.7849845*m.x2885) +
0.434294481903252*log(1 + 26846.0391789491*m.x2886) + 0.434294481903252*log(1 + 153737.714172163
*m.x2887) + 0.434294481903252*log(1 + 83236.7289453122*m.x2888) + 0.434294481903252*log(1 +
1377369.14183994*m.x2889) + 0.434294481903252*log(1 + 185127.15294154*m.x2890) +
0.434294481903252*log(1 + 18793.8044902648*m.x2891) + 0.434294481903252*log(1 + 39127.496823418*
m.x2892) + 0.434294481903252*log(1 + 3844218.92842541*m.x2893) + 0.434294481903252*log(1 +
674391.696711789*m.x2894) + 0.434294481903252*log(1 + 110953.150097786*m.x2895) +
0.434294481903252*log(1 + 623528.661429016*m.x2896) + 0.434294481903252*log(1 + 61176.180433679*
m.x2897) + 0.434294481903252*log(1 + 81404.6680394227*m.x2898) + 0.434294481903252*log(1 +
348427.342733275*m.x2899) + 0.434294481903252*log(1 + 484110.852828756*m.x2900) +
0.434294481903252*log(1 + 497793.972491808*m.x2901) + 0.434294481903252*log(1 + 66452.7693741254
*m.x2902) + 0.434294481903252*log(1 + 124864.508568708*m.x2903) + 0.434294481903252*log(1 +
31986.4667640848*m.x2904) + 0.434294481903252*log(1 + 19514.0724917737*m.x2905) +
0.434294481903252*log(1 + 356909.689100181*m.x2906) + 0.434294481903252*log(1 + 121518.365473318
*m.x2907) + 0.434294481903252*log(1 + 27268.342024951*m.x2908) + 0.434294481903252*log(1 +
226202.424267342*m.x2909) + 0.434294481903252*log(1 + 72719.1984901939*m.x2910) +
0.434294481903252*log(1 + 2948593.67958428*m.x2911) + 0.434294481903252*log(1 + 461491.442260071
*m.x2912) + 0.434294481903252*log(1 + 118274.574237657*m.x2913) + 0.434294481903252*log(1 +
2727252.20101762*m.x2914) + 0.434294481903252*log(1 + 158432.086768717*m.x2915) +
0.434294481903252*log(1 + 54356.4970954898*m.x2916) + 0.434294481903252*log(1 + 224932.264922018
*m.x2917) + 0.434294481903252*log(1 + 679875.424507976*m.x2918) + 0.434294481903252*log(1 +
52187.0131774279*m.x2919) + 0.434294481903252*log(1 + 76620.7956727285*m.x2920) +
0.434294481903252*log(1 + 18736.6098653525*m.x2921) + 0.434294481903252*log(1 + 202005.136263659
*m.x2922) + 0.434294481903252*log(1 + 217453.072915785*m.x2923) + 0.434294481903252*log(1 +
450.12689610136*m.x2924) + 0.434294481903252*log(1 + 1232575.93033705*m.x2925) +
0.434294481903252*log(1 + 179817.236768535*m.x2926) + 0.434294481903252*log(1 + 899528.764510352
*m.x2927) + 0.434294481903252*log(1 + 6023.94160784727*m.x2928) + 0.434294481903252*log(1 +
457630.803261599*m.x2929) + 0.434294481903252*log(1 + 951026.337320394*m.x2930) +
0.434294481903252*log(1 + 2501904.31867467*m.x2931) + 0.434294481903252*log(1 + 117397.565413988
*m.x2932) + 0.434294481903252*log(1 + 262108.541881598*m.x2933) + 0.434294481903252*log(1 +
111002.797997961*m.x2934) + 0.434294481903252*log(1 + 129137.862731217*m.x2935) +
0.434294481903252*log(1 + 22004.9822975039*m.x2936) + 0.434294481903252*log(1 + 475904.959707362
*m.x2937) + | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 4 15:23:46 2018
@author: Jared
"""
from collections import Counter
import pymongo
import pandas as pd
from ast import literal_eval
from ml.elements import *
#import machineLearner as ml #get rid of if doing oqmd database
#from qmpy import * #use python 2.7!
from matplotlib import pyplot as plt
import math
#import mysql.connector
import numpy as np
from pandas.plotting import scatter_matrix
import matplotlib.patches as mpatches
import matplotlib
import matplotlib.gridspec as gridspec
# ENERGY OF FORMATION
# dH = totalEnergy - sum(i,x){x*E_i}, x number of atoms of that type
# STABILITY (E_HULL)
# dH_stab = dH - dH_hull
# dH_hull (ev/atom), but calculated a different way than our energy of formation
# We need
# Access Syntax for direct acces to DB
'''
cnx = mysql.connector.connect(user='root', password='<PASSWORD>',
host='127.0.0.1',
database='qmpy_jared')
cursor = cnx.cursor()
cursor.execute("USE qmpy_jared;")
cursor.close()
cnx.close()
'''
# DEFINITIONS FOR OQMD DATA
'''
space = 'Cs-Sn-Br'
comp = 'CsSnBr3'
space = PhaseSpace(space)
energy, phase = space.gclp(comp)
compute_stability
print(energy, phase)
'''
def main():
matplotlib.rcParams.update({'font.size': 15.5})
# QUICK LOAD TO AVOID CALCULATION
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
deltaH_qmpy = pd.read_csv(path + 'compEnergy_qmdb_d3.csv')
print('qmpy ', len(deltaH_qmpy))
mng_client = pymongo.MongoClient('localhost', 27017)
db = mng_client['perovskites']
# GET AGGREGATED CRYSTAL DATA FROM MONGODB
df = pd.DataFrame(list(db['qw_outputs_aggregated'].find()))
#df = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/Data/crystalDB3/aggregated_features_14092018.csv')
df_features = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/Data/featureDB2/d2_paper_24102018.csv')
'''
plt.ylabel('$E_{gap}$ (eV)')
plt.xlabel('Iodine Mixing Fraction')
plt.title('Iodine Bandgap Trend')
s = 'fracI'
s2 = 'dir_gap'
y_cl = df_features.groupby([s])[s2].mean()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
plt.scatter(df_features[s], df_features[s2], alpha = 0.2)
p1, = plt.plot(x_cl, y_cl, linestyle = '-', lw = 2, label = 'D$_{3}$')
ax1 = plt.axes()
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
#plt.legend(handles = [p1])
plt.tight_layout()
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
plt.savefig(path + 'dummyTrend_realI.png', dpi = 400, bbox_inches="tight")
plt.show()
'''
#df = df.dropna(axis = 0)
dff = df.drop(df[df['nIterations'] >= 201].index).copy()
dff = dff.drop(df[df['crystal_id'] == 1526850748].index).copy()
df = dff.drop(df[df['crystal_id'] == 1526752626].index).copy()
print('here', len(df))
#deltaH_qmdb = getCrystalOQMDData(df)
# MY CALCULATED FORMATION ENERGY
mu = getMuCorrectedDFT2()
deltaH2_formation = getDeltaH_formation(df, mu)
mu = getMuDFT()
deltaH_formation = getDeltaH_formation(df, mu)
#df_delta = pd.DataFrame(deltaH_formation, columns = 'dH_formation')
#deltaH_formation.to_csv('/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/df_formation.csv')
#plotDeltaH_formation(list(deltaH_formation['deltaH_formation']))
# GEOMETRIC FORMATION ENERGY (BASED ON FIT)
#deltaH_geo = getDeltaH_geo(df)
#deltaH_geo.to_csv('/Users/Jared/Dropbox/Master Thesis/' +
# 'code/codeOutputs/deltaH_geo.csv')
deltaH_geo = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/' +
'code/codeOutputs/deltaH_geo.csv')
print('geo', len(deltaH_geo))
#plotDeltaH_geo(list(deltaH_geo['deltaH_geo']))
# comparison of geometric approach fidelity
'''
plt.plot(deltaH_geo['descriptor'], deltaH['deltaH'], 'o')
plt.xlabel('$(t + \mu)^{\eta}$')
plt.ylabel('$\Delta H_{f}$ (eV/atom)')
plt.title('Formation Energy vs. Geometric Factor')
plt.show()
'''
#error associated with SG15 basis set
#delta = ((10.78 + 8.19 + 7.69 + 0.19)*(4/20) +
# (4.35 + 8.07)*(4/20) +
# (1.9 + 6.03 + 5.53)*(8/20))
# MERGE ALL DATA
result = pd.merge(deltaH_formation, deltaH_qmpy, on=['crystal_id'])
result = pd.merge(result, deltaH_geo, on=['crystal_id'])
result= pd.merge(result, df_features, on=['crystal_id'])
result_corrected = pd.merge(deltaH2_formation, deltaH_qmpy, on=['crystal_id'])
result_corrected = pd.merge(result_corrected, deltaH_geo, on=['crystal_id'])
result_corrected = pd.merge(result_corrected, df_features, on=['crystal_id'])
sresult = result_corrected
'''
result = result[result.crystal_id != 1519471915]
result = result[result.crystal_id != 1519608323]
result = result[result.crystal_id != 1519429441]
result = result[result.crystal_id != 1520265350]
result = result[result.crystal_id != 1520268226]
result = result[result.crystal_id != 1520334800]
result = result[result.crystal_id != 1520343157]
result = result[result.crystal_id != 1520349833]
result = result[result.crystal_id != 1520411007]
result = result[result.crystal_id != 1520429554]
result = result[result.crystal_id != 1520442584]
result = result[result.crystal_id != 1520483780]
'''
# big plot
my_dpi = 500
fig = plt.figure(figsize=(5, 5), dpi=my_dpi)
m = np.array((list(result['deltaH_formation'] - result['deltaH_hull'])))
m = m.mean()
m = 0.150 # 100 mev line
ymin = 1.12*min(result['deltaH_hull']) if min(result['deltaH_hull']) <=0 else 0.88*min(result['deltaH_hull'])
ymax = 1.12*max(result['deltaH_hull']) if max(result['deltaH_hull']) >=0 else 0.88*max(result['deltaH_hull'])
xmax = ymax
plt.ylim(ymin, ymax)
plt.xlim(ymin, xmax)
xy = [min(result['deltaH_hull']), max(result['deltaH_hull'])]
xy = [ymin, ymax]
p1, = plt.plot(xy, xy, color = 'k', label = '$E_{hull}$')
p0c, = plt.plot(result['deltaH_hull'],
result_corrected['deltaH_formation'], 'o',
alpha = 0.5, color = 'r', label = '$\mu_{corrected}$')
p0, = plt.plot(result['deltaH_hull'],
result['deltaH_formation'], 'o',
alpha = 0.5, label = '$\mu$')
#p1, = plt.plot(xy, xy, color = 'k', label = '$E_{hull}$')
#xy = [min(result['deltaH_hull']), max(result['deltaH_hull'])]
#p2, = plt.plot(xy, [i + m for i in xy], alpha = 1.0,
# color = 'k',
# label = '$\Delta E_{hull}$ = 100 meV',
# linestyle = '--', linewidth = 3.0)
plt.xlabel('$\Delta H_{f, OQMD}$ (eV/atom)')
plt.ylabel('$\Delta H_{f}$ (eV/atom)')
plt.title('Convex Hull Distance', y = 1.04)
plt.legend(handles = [p0c, p0, p1])
ax1 = plt.axes()
ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
plt.savefig(path + 'paper_oqmdb_new1.png', dpi=400, bbox_inches="tight")
plt.show()
'''
# hist plot
c, d, e = plt.hist(list(result['deltaH_formation'] - result['deltaH_hull']), bins = 21)
plt.setp(e, edgecolor='w', lw=1, alpha = 0.7)
#plt.title('Stability of ' + str(len(result)) + ' Compounds')
#plt.xlabel('$E_{hull}$ distance (eV)')
#plt.ylabel('Count')
c, d, e = plt.hist(
list(result_corrected['deltaH_formation'] -
result['deltaH_hull']), bins = 21, color = 'r')
plt.setp(e, edgecolor='w', lw=1, alpha = 0.7)
plt.title('D$_{3}$ Hull Distance')
plt.xlabel('$\Delta E_{hull}$ (eV)')
plt.ylabel('Count')
ax1 = plt.axes()
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
plt.savefig(path + 'oqmdb_new1.png', dpi=400, bbox_inches="tight")
plt.show()
'''
#sresult = result_corrected.copy() #result_corrected[['fracCl','fracBr',
# 'fracI', 'fracCs',
#'fracRb', 'fracNa',
#'fracK', 'fracSn',
# 'fracGe', 'deltaH_hull']]
#plt.scatter(result['fracCl'], result['deltaH_hull'])
#print(sresult['t'])
print(len(sresult))
#
#
# lattice validity
t1 = 2*(sresult['lb'].values)/(sresult['la'].values)
t2 = 2*(sresult['lb'].values)/(sresult['lc'].values)
'''
blue_patch = mpatches.Patch(color='blue', label='2*lb/la')
red_patch = mpatches.Patch(color='red', label='2*lb/lc')
c2, d2, e2 = plt.hist(t1, bins = 21, color = 'b')
plt.setp(e2, edgecolor='w', lw=1, alpha = 0.7)
c1, d1, e1 = plt.hist(t2, bins = 21, color = 'r')
plt.setp(e1, edgecolor='w', lw=1, alpha = 0.7)
plt.legend(handles=[blue_patch, red_patch])
plt.title('D$_{3}$ Perovskite Validity')
plt.xlabel('Lattice Vector Ratio')
plt.ylabel('Count')
plt.show()
'''
sresult['hullDistance'] = list(result_corrected['deltaH_formation'] -
result_corrected['deltaH_hull'])
sresult['deltaH_formation'] = list(result_corrected['deltaH_formation'])
'''
#
#
# goldshmitd vs dhhull
plt.scatter(sresult['t'].values, sresult['hullDistance'].values)
plt.show()
#
#
# goldschmidt validity
#plt.hist(sresult['t'].values)
c1, d1, e1 = plt.hist(sresult['t'].values, bins = 21)
plt.setp(e1, edgecolor='w', lw=1)
plt.title('D$_{3}$ Perovskite Validity')
plt.xlabel('Goldschmidt Tolerance Factor')
plt.ylabel('Count')
plt.show()
'''
plt.ylabel('$\Delta E_{hull}$ (eV)')
plt.xlabel('Sodium Mixing Fraction')
plt.title('Sodium $\Delta E_{hull}$ Trend')
s = 'fracNa'
s2 = 'hullDistance'
y_cl = sresult.groupby([s])[s2].mean()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
plt.scatter(sresult[s], sresult[s2], alpha = 0.2)
plt.plot(x_cl, y_cl, linestyle = '-', lw = 2, label = 'D$_{3}$')
ax1 = plt.axes()
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
#plt.legend(handles = [p1])
plt.tight_layout()
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
plt.savefig(path + 'dummyTrend_realNa.png', dpi = 400, bbox_inches="tight")
plt.show()
print(xx)
# run each of these with d3 data
s = 'dir_gap'
sname = '$E_{gap}$ (eV)'
fname = 'eGap'
'''
s = 'deltaH_hull'
s = 'hullDistance'
sname = '$\Delta E_{hull}$ (eV)'
fname = 'dh_hull'
s = 'deltaH_formation'
sname = '$\Delta H_{f}$ (eV/atom)'
fname = 'dh_form'
# goldschmidt
s = 't' #'dir_gap'
sname = '$t$'
fname = 'gold'
#lattice
sresult['t1'] = t2
s = 't1' #'dir_gap'
sname = '2*lb/la'
fname = '2lbla'
'''
glist = [g for g in sresult.groupby(['fracCl'])[s]]
print(type(sresult[s].values[0]))
y_cl = sresult.groupby(['fracCl'])[s].mean()
y_cl_sd = sresult.groupby(['fracCl'])[s].std()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
y_br = sresult.groupby(['fracBr'])[s].mean()
y_br_sd = sresult.groupby(['fracBr'])[s].std()
x_br = np.array([i for i in y_br.index])
y_br = y_br.values
y_i = sresult.groupby(['fracI'])[s].mean()
y_i_sd = sresult.groupby(['fracI'])[s].std()
x_i = np.array([i for i in y_i.index])
y_i = y_i.values
y_cs = sresult.groupby(['fracCs'])[s].mean()
y_cs_sd = sresult.groupby(['fracCs'])[s].std()
x_cs = np.array([i for i in y_cs.index])
y_cs = y_cs.values
y_rb = sresult.groupby(['fracRb'])[s].mean()
y_rb_sd = sresult.groupby(['fracRb'])[s].std()
x_rb = np.array([i for i in y_rb.index])
y_rb = y_rb.values
y_na = sresult.groupby(['fracNa'])[s].mean()
y_na_sd = sresult.groupby(['fracNa'])[s].std()
x_na = np.array([i for i in y_na.index])
y_na = y_na.values
y_k = sresult.groupby(['fracK'])[s].mean()
y_k_sd = sresult.groupby(['fracK'])[s].std()
x_k = np.array([i for i in y_k.index])
y_k = y_k.values
y_sn = sresult.groupby(['fracSn'])[s].mean()
y_sn_sd = sresult.groupby(['fracSn'])[s].std()
x_sn = np.array([i for i in y_sn.index])
y_sn = y_sn.values
y_ge = sresult.groupby(['fracGe'])[s].mean()
y_ge_sd = sresult.groupby(['fracGe'])[s].std()
x_ge = np.array([i for i in y_ge.index])
y_ge | |
from django.db import models
from django.urls import reverse_lazy
from .informacion_personal import(Declaraciones, Domicilios, Observaciones,
InfoPersonalVar)
from .catalogos import (CatTiposInmuebles, CatTiposTitulares,
CatFormasAdquisiciones, CatSectoresIndustria,
CatMonedas, CatTiposOperaciones, CatTiposMuebles,
CatPaises, CatEntidadesFederativas,
CatTiposEspecificosInversiones, CatTiposInversiones,
CatTiposMetales, CatTiposFideicomisos,
CatTiposRelacionesPersonales, CatUnidadesTemporales, CatActivoBien,
CatTipoParticipacion, CatEntesPublicos)
class ActivosBienes(models.Model):
BIENES_INMUEBLES = 1
BIENES_INTANGIBLES = 2
BIENES_MUEBLES = 3
MUEBLES_NO_REGISTRABLES = 4
FIDEICOMISOS = 5
CUENTAS_POR_COBRAR = 6
declaraciones = models.ForeignKey(Declaraciones, on_delete=models.DO_NOTHING)
id_activobien = models.IntegerField(null=True)
cat_activo_bien = models.ForeignKey(CatActivoBien, on_delete=models.DO_NOTHING, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class BienesPersonas(models.Model):
COVENDEDOR = 1
COPROPIETARIO = 2
FIDEICOMITENTE = 3
FIDEICOMISARIO = 4
FIDUCIARIO = 5
PRESTATARIO_O_DEUDOR = 6
DECLARANTE = 7
COPROPIETARIO = 8
PROPIETARIO_ANTERIOR = 9
info_personal_var = models.ForeignKey(InfoPersonalVar, on_delete=models.DO_NOTHING, related_name="bienes_personas_info_personal_var")
activos_bienes = models.ForeignKey(ActivosBienes, on_delete=models.DO_NOTHING)
porcentaje = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
es_propietario = models.BooleanField(blank=True, null=True, default=None)
precio_adquision = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
el_adquirio = models.BooleanField(blank=True, null=True, default=None)
cat_tipo_participacion = models.ForeignKey(CatTipoParticipacion, on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
tipo_relacion = models.ForeignKey(CatTiposRelacionesPersonales, on_delete=models.DO_NOTHING, blank=True, null=True)
otra_relacion = models.CharField(max_length=255, blank=True, null=True)
otra_relacion_familiar = models.CharField(max_length=255, blank=True)
otra_persona = models.ForeignKey(InfoPersonalVar, on_delete=models.DO_NOTHING, blank=True, null=True, related_name="bienes_personas_otra_persona")
def tipo(self):
return self.cat_tipo_participacion_id
def relacion(self):
try:
if self.tipo_relacion.default:
return u"{} {}".format(self.tipo_relacion,
self.otra_relacion)
else:
return u"{}".format(self.tipo_relacion)
except Exception as e:
return u""
class BienesInmuebles(models.Model):
superficie_terreno = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
superficie_construccion = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
otro_titular = models.CharField(max_length=255, blank=True)
num_escritura_publica = models.CharField(max_length=255, blank=True)
num_registro_publico = models.CharField(max_length=255, blank=True)
folio_real = models.CharField(max_length=255, blank=True)
fecha_contrato_compra = models.DateField(null=True, blank=True)
otra_forma = models.CharField(max_length=255, blank=True)
fecha_adquisicion = models.DateField(null=True, blank=True)
precio_adquisicion = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
valor_catastral = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
cat_formas_adquisiciones = models.ForeignKey(CatFormasAdquisiciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_monedas = models.ForeignKey(CatMonedas, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_inmuebles = models.ForeignKey(CatTiposInmuebles, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_operaciones = models.ForeignKey(CatTiposOperaciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_titulares = models.ForeignKey(CatTiposTitulares, on_delete=models.DO_NOTHING, null=True, blank=True)
declaraciones = models.ForeignKey(Declaraciones, on_delete=models.DO_NOTHING)
domicilios = models.ForeignKey(Domicilios, on_delete=models.DO_NOTHING)
observaciones = models.ForeignKey(Observaciones, on_delete=models.DO_NOTHING)
activos_bienes = models.ForeignKey(ActivosBienes, on_delete=models.DO_NOTHING)
otra_operacion = models.CharField(max_length=255, blank=True, null=True)
otro_inmueble = models.CharField(max_length=255, blank=True, null=True)
def persona(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.COPROPIETARIO).first()
except Exception as e:
return None
def copropietario(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.COPROPIETARIO)
except Exception as e:
return None
def declarante(self):
try:
return [BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.DECLARANTE).first().info_personal_var]
except Exception as e:
return None
def propierario_anterior(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.PROPIETARIO_ANTERIOR)
except Exception as e:
return None
def observacion(self):
return [self.observaciones]
def columna_uno(self):
if self.cat_tipos_operaciones:
return u"{}".format(self.cat_tipos_operaciones)
else:
return u""
def columna_dos(self):
if self.cat_formas_adquisiciones:
return u"{}".format(self.cat_formas_adquisiciones)
else:
return u""
def columna_tres(self):
if self.cat_tipos_titulares:
return u"{}".format(self.cat_tipos_titulares)
else:
return u""
def url_editar(self):
return reverse_lazy('declaracion:bienes-inmuebles-editar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_borrar(self):
return reverse_lazy('declaracion:bienes-inmuebles-borrar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_todos(self):
return reverse_lazy('declaracion:bienes-inmuebles',
kwargs={'folio': self.declaraciones.folio})
def tipo_operacion(self):
try:
if self.cat_tipos_operaciones.default:
return u"{} {}".format(self.cat_tipos_operaciones,
self.otra_operacion)
else:
return u"{}".format(self.cat_tipos_operaciones)
except Exception as e:
return u""
def tipo_inmueble(self):
try:
if self.cat_tipos_inmuebles.default:
return u"{} {}".format(self.cat_tipos_inmuebles,
self.otro_inmueble)
else:
return u"{}".format(self.cat_tipos_inmuebles)
except Exception as e:
return u""
def titular(self):
try:
if self.cat_tipos_titulares.default:
return u"{} {}".format(self.cat_tipos_titulares,
self.otro_titular)
else:
return u"{}".format(self.cat_tipos_titulares)
except Exception as e:
return u""
def forma_adquisicion(self):
try:
if self.cat_formas_adquisiciones.default:
return u"{} {}".format(self.cat_formas_adquisiciones,
self.otra_forma)
else:
return u"{}".format(self.cat_formas_adquisiciones)
except Exception as e:
return u""
class BienesMuebles(models.Model):
otra_operacion = models.CharField(max_length=255, blank=True)
otro_tipo_mueble = models.CharField(max_length=255, blank=True)
marca = models.CharField(max_length=255, blank=True)
submarca = models.CharField(max_length=255, blank=True)
modelo = models.IntegerField(blank=True, null=True)
num_serie = models.CharField(max_length=255, blank=True)
otro_titular = models.CharField(max_length=255, blank=True)
num_registro_vehicular = models.CharField(max_length=255, blank=True)
otra_forma = models.CharField(max_length=255, blank=True)
otro_sector = models.CharField(max_length=255, blank=True)
fecha_adquisicion = models.DateField(null=True, blank=True)
precio_adquisicion = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
cat_entidades_federativas = models.ForeignKey(CatEntidadesFederativas, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_formas_adquisiciones = models.ForeignKey(CatFormasAdquisiciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_monedas = models.ForeignKey(CatMonedas, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_paises = models.ForeignKey(CatPaises, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_muebles = models.ForeignKey(CatTiposMuebles, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_operaciones = models.ForeignKey(CatTiposOperaciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_titulares = models.ForeignKey(CatTiposTitulares, on_delete=models.DO_NOTHING, null=True, blank=True)
declaraciones = models.ForeignKey(Declaraciones, on_delete=models.DO_NOTHING)
observaciones = models.ForeignKey(Observaciones, on_delete=models.DO_NOTHING)
activos_bienes = models.ForeignKey(ActivosBienes, on_delete=models.DO_NOTHING)
def declarante(self):
try:
return [BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.DECLARANTE).first().info_personal_var]
except Exception as e:
return None
def copropietario(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.COPROPIETARIO)
except Exception as e:
print(e)
return None
def propierario_anterior(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.PROPIETARIO_ANTERIOR)
except Exception as e:
return None
def observacion(self):
return [self.observaciones]
def columna_uno(self):
if self.cat_tipos_operaciones:
return u"{}".format(self.cat_tipos_operaciones)
else:
return u""
def columna_dos(self):
if self.cat_formas_adquisiciones:
return u"{}".format(self.cat_formas_adquisiciones)
else:
return u""
def columna_tres(self):
if self.cat_tipos_titulares:
return u"{}".format(self.cat_tipos_titulares)
else:
return u""
def url_editar(self):
return reverse_lazy('declaracion:bienes-muebles-editar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_borrar(self):
return reverse_lazy('declaracion:bienes-muebles-borrar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_todos(self):
return reverse_lazy('declaracion:bienes-muebles',
kwargs={'folio': self.declaraciones.folio})
def tipo_operacion(self):
try:
if self.cat_tipos_operaciones.default:
return u"{} {}".format(self.cat_tipos_operaciones,
self.otra_operacion)
else:
return u"{}".format(self.cat_tipos_operaciones)
except Exception as e:
return u""
def tipo_mueble(self):
try:
if self.cat_tipos_muebles.default:
return u"{} {}".format(self.cat_tipos_muebles,
self.otro_tipo_mueble)
else:
return u"{}".format(self.cat_tipos_muebles)
except Exception as e:
return u""
def titular(self):
try:
if self.cat_tipos_titulares.default:
return u"{} {}".format(self.cat_tipos_titulares,
self.otro_titular)
else:
return u"{}".format(self.cat_tipos_titulares)
except Exception as e:
return u""
def forma_adquisicion(self):
try:
if self.cat_formas_adquisiciones.default:
return u"{} {}".format(self.cat_formas_adquisiciones,
self.otra_forma)
else:
return u"{}".format(self.cat_formas_adquisiciones)
except Exception as e:
return u""
class MueblesNoRegistrables(models.Model):
otra_operacion = models.CharField(max_length=255, blank=True)
otro_bien_mueble = models.CharField(max_length=255, blank=True)
descripcion_bien = models.CharField(max_length=255, blank=True)
otro_titular = models.CharField(max_length=255, blank=True)
otra_forma = models.CharField(max_length=255, blank=True)
fecha_adquisicion = models.DateField(null=True, blank=True)
precio_adquisicion = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
cat_formas_adquisiciones = models.ForeignKey(CatFormasAdquisiciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_monedas = models.ForeignKey(CatMonedas, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_muebles = models.ForeignKey(CatTiposMuebles, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_operaciones = models.ForeignKey(CatTiposOperaciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_titulares = models.ForeignKey(CatTiposTitulares, on_delete=models.DO_NOTHING, null=True, blank=True)
declaraciones = models.ForeignKey(Declaraciones, on_delete=models.DO_NOTHING)
observaciones = models.ForeignKey(Observaciones, on_delete=models.DO_NOTHING)
activos_bienes = models.ForeignKey(ActivosBienes, on_delete=models.DO_NOTHING)
def declarante(self):
try:
return [BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.DECLARANTE).first().info_personal_var]
except Exception as e:
return None
def copropietario(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.COPROPIETARIO)
except Exception as e:
return None
def propierario_anterior(self):
try:
return BienesPersonas.objects.filter(activos_bienes = self.activos_bienes,cat_tipo_participacion_id=BienesPersonas.PROPIETARIO_ANTERIOR)
except Exception as e:
return None
def observacion(self):
return [self.observaciones]
def columna_uno(self):
if self.cat_tipos_operaciones:
return u"{}".format(self.cat_tipos_operaciones)
else:
return u""
def columna_dos(self):
if self.cat_formas_adquisiciones:
return u"{}".format(self.cat_formas_adquisiciones)
else:
return u""
def columna_tres(self):
if self.cat_tipos_titulares:
return u"{}".format(self.cat_tipos_titulares)
else:
return u""
def url_editar(self):
return reverse_lazy('declaracion:muebles-noregistrables-editar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_borrar(self):
return reverse_lazy('declaracion:muebles-noregistrables-borrar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_todos(self):
return reverse_lazy('declaracion:muebles-noregistrables',
kwargs={'folio': self.declaraciones.folio})
def tipo_operacion(self):
try:
if self.cat_tipos_operaciones.default:
return u"{} {}".format(self.cat_tipos_operaciones,
self.otra_operacion)
else:
return u"{}".format(self.cat_tipos_operaciones)
except Exception as e:
return u""
def tipo_mueble(self):
try:
if self.cat_tipos_muebles.default:
return u"{} {}".format(self.cat_tipos_muebles,
self.otro_bien_mueble)
else:
return u"{}".format(self.cat_tipos_muebles)
except Exception as e:
return u""
def titular(self):
try:
if self.cat_tipos_titulares.default:
return u"{} {}".format(self.cat_tipos_titulares,
self.otro_titular)
else:
return u"{}".format(self.cat_tipos_titulares)
except Exception as e:
return u""
def forma_adquisicion(self):
try:
if self.cat_formas_adquisiciones.default:
return u"{} {}".format(self.cat_formas_adquisiciones,
self.otra_forma)
else:
return u"{}".format(self.cat_formas_adquisiciones)
except Exception as e:
return u""
class Inversiones(models.Model):
otra_operacion = models.CharField(max_length=255, blank=True)
otra_inversion = models.CharField(max_length=255, blank=True)
otro_tipo_especifico = models.CharField(max_length=255, blank=True)
num_cuenta = models.CharField(max_length=255, blank=True)
otra_forma = models.CharField(max_length=255, blank=True)
fecha_inicio = models.DateField(null=True, blank=True)
monto_original = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
tasa_interes = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
saldo_actual = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
plazo = models.DecimalField(max_digits=6, decimal_places=2, null=True, blank=True, default=0)
cat_tipos_titulares = models.ForeignKey(CatTiposTitulares, on_delete=models.DO_NOTHING, null=True, blank=True)
otro_tipo_titular = models.CharField(max_length=255, blank=True)
porcentaje_inversion = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
cat_formas_adquisiciones = models.ForeignKey(CatFormasAdquisiciones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_monedas = models.ForeignKey(CatMonedas, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_paises = models.ForeignKey(CatPaises, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_especificos_inversiones = models.ForeignKey(CatTiposEspecificosInversiones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_inversiones = models.ForeignKey(CatTiposInversiones, on_delete=models.DO_NOTHING, null=True, blank=True)
cat_tipos_operaciones = models.ForeignKey(CatTiposOperaciones, on_delete=models.DO_NOTHING, null=True, blank=True)
declaraciones = models.ForeignKey(Declaraciones, on_delete=models.DO_NOTHING)
observaciones = models.ForeignKey(Observaciones, on_delete=models.DO_NOTHING)
info_personal_var = models.ForeignKey(InfoPersonalVar, on_delete=models.DO_NOTHING)
cat_unidades_temporales = models.ForeignKey(CatUnidadesTemporales, on_delete=models.DO_NOTHING, null=True, blank=True)
def observacion(self):
return [self.observaciones]
def columna_uno(self):
if self.cat_tipos_operaciones:
return u"{}".format(self.cat_tipos_operaciones)
else:
return u""
def columna_dos(self):
if self.cat_formas_adquisiciones:
return u"{}".format(self.cat_formas_adquisiciones)
else: return u""
def columna_tres(self):
if self.cat_tipos_titulares:
return u"{}".format(self.cat_tipos_titulares)
else:
return u""
def url_editar(self):
return reverse_lazy('declaracion:inversiones-editar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_borrar(self):
return reverse_lazy('declaracion:inversiones-borrar',
kwargs={'folio': self.declaraciones.folio,
'pk': self.id})
def url_todos(self):
return reverse_lazy('declaracion:inversiones',
kwargs={'folio': self.declaraciones.folio})
def persona(self):
return [self.info_personal_var]
def tipo_operacion(self):
try:
if self.cat_tipos_operaciones.default:
return u"{} {}".format(self.cat_tipos_operaciones,
self.otra_operacion)
else:
return u"{}".format(self.cat_tipos_operaciones)
except Exception as e:
return u""
def titular(self):
try:
if self.cat_tipos_titulares.default:
return u"{} {}".format(self.cat_tipos_titulares,
self.otro_tipo_titular)
else:
return u"{}".format(self.cat_tipos_titulares)
except Exception as e:
return u""
def forma_adquisicion(self):
try:
if self.cat_formas_adquisiciones.default:
return u"{} {}".format(self.cat_formas_adquisiciones,
self.otra_forma)
else:
return u"{}".format(self.cat_formas_adquisiciones)
except Exception as e:
return u""
def tipo_inversion(self):
try:
if self.cat_tipos_inversiones.default:
return u"{} {}".format(self.cat_tipos_inversiones,
self.otra_inversion)
else:
return u"{}".format(self.cat_tipos_inversiones)
except Exception as e:
return u""
def tipo_especifico(self):
try:
if self.cat_tipos_especificos_inversiones.default:
return u"{} {}".format(self.cat_tipos_especificos_inversiones,
self.otro_tipo_especifico)
else:
return u"{}".format(self.cat_tipos_especificos_inversiones)
except Exception as e:
return u""
class EfectivoMetales(models.Model):
otro_tipo_operacion = models.CharField(max_length=255, blank=True)
monto_efectivo = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
otro_metal = | |
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
ASEGGDF2NetCDFConverter concrete class for converting ASEG-GDF data to netCDF
Created on 28Mar.2018
@author: <NAME>
'''
import argparse
from collections import OrderedDict
import numpy as np
import re
import os
import sys
from datetime import datetime
from pprint import pformat
import yaml
import tempfile
import netCDF4
import logging
from geophys_utils.netcdf_converter import ToNetCDFConverter, NetCDFVariable
from geophys_utils import get_spatial_ref_from_wkt
from geophys_utils.netcdf_converter.aseg_gdf_utils import aseg_gdf_format2dtype, fix_field_precision, truncate
from geophys_utils import points2convex_hull
from geophys_utils import transform_coords
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
TEMP_DIR = tempfile.gettempdir()
#TEMP_DIR = 'D:\Temp'
#TEMP_DIR = 'U:\Alex\Temp'
# Set this to zero for no limit - only set a non-zero value for testing
POINT_LIMIT = 0
# Number of rows per chunk in temporary netCDF cache file
CACHE_CHUNK_ROWS = 8192
class ASEGGDF2NetCDFConverter(ToNetCDFConverter):
'''
ASEGGDF2NetCDFConverter concrete class for converting ASEG-GDF data to netCDF
'''
def __init__(self,
nc_out_path,
aem_dat_path,
dfn_path,
crs_string=None,
netcdf_format='NETCDF4',
default_chunk_size=None,
default_variable_parameters=None,
settings_path=None,
fix_precision=True,
space_delimited=False
):
'''
Concrete constructor for subclass ASEGGDF2NetCDFConverter
Needs to initialise object with everything that is required for the other Concrete methods
N.B: Make sure the base class constructor is called from the subclass constructor
@param nc_out_path: Path to output netCDF file on filesystem
@param aem_dat_path: Path to .dat AEM data source file on filesystem
@param dfn_path: Path to .dfn definition file on filesystem
@param netcdf_format: Format for netCDF file. Defaults to 'NETCDF4_CLASSIC'
@param default_chunk_size: single default chunk size for all dimensions. None means take default, zero means not chunked.
@param default_variable_parameters: Optional dict containing default parameters for netCDF variable creation
@param settings_path: Optional path for settings YAML file
@param fix_precision: Optional Boolean flag indicating whether to fix (i.e. reduce) field precisions
'''
def get_field_definitions():
'''
Function to read raw field definitions from .dfn file
Will set self.dimensions as an Ordereddict of dimension sizes keyed by dimension name
'''
def parse_dfn_file(dfn_path):
logger.info('Reading definitions file {}'.format(dfn_path))
self.field_definitions = []
dfn_file = open(dfn_path, 'r')
for line in dfn_file:
key_value_pairs = {}
positional_value_list = []
for semicolon_split_string in [semicolon_split_string.strip() for semicolon_split_string in line.split(';')]:
for colon_split_string in [colon_split_string.strip() for colon_split_string in semicolon_split_string.split(':')]:
for comma_split_string in [comma_split_string.strip() for comma_split_string in colon_split_string.split(',')]:
definition = [equals_split_string.strip() for equals_split_string in comma_split_string.split('=')]
if len(definition) == 2:
key_value_pairs[definition[0]] = definition[1]
elif len(definition) == 1:
positional_value_list.append(definition[0])
logger.debug('key_value_pairs: {},\npositional_value_list: {}'.format(pformat(key_value_pairs), pformat(positional_value_list)))
# Column definition
if key_value_pairs.get('RT') in ['', 'DATA'] and (positional_value_list
and positional_value_list[0] != 'END DEFN'):
short_name = positional_value_list[0].lower()
fmt = positional_value_list[1] if len(positional_value_list) >= 2 else None
units = key_value_pairs.get('UNITS') or key_value_pairs.get('UNIT')
long_name = key_value_pairs.get('NAME') or (positional_value_list[2] if len(positional_value_list) >= 3 else None)
fill_value = float(key_value_pairs.get('NULL')) if key_value_pairs.get('NULL') is not None else None
# Parse format to determine columns, data type and numeric format
dtype, columns, width_specifier, decimal_places = aseg_gdf_format2dtype(fmt)
field_definition = {'short_name': short_name,
'format': fmt,
'long_name': long_name,
'dtype': dtype,
'columns': columns,
'width_specifier': width_specifier,
'decimal_places': decimal_places
}
if units:
field_definition['units'] = units
if fill_value is not None:
field_definition['fill_value'] = fill_value
# Set variable attributes in field definition
variable_attribute_dict = {attribute_name: key_value_pairs.get(key.upper())
for key, attribute_name in self.settings['variable_attributes'].items()
if key_value_pairs.get(key.upper()) is not None
}
# Store aseg_gdf_format in variable attributes
variable_attribute_dict['aseg_gdf_format'] = fmt
if variable_attribute_dict:
field_definition['variable_attributes'] = variable_attribute_dict
self.field_definitions.append(field_definition)
# Set CRS from projection name
elif not self.spatial_ref:
if (key_value_pairs.get('RT') == 'PROJ') and (positional_value_list[0] == 'COORDSYS'): # As per ASEG standard
projection_name = key_value_pairs.get('NAME')
if projection_name:
self.spatial_ref = get_spatial_ref_from_wkt(projection_name)
logger.debug('CRS set from .dfn file COORDSYS COMMENT attribute {}'.format(projection_name))
break # Nothing more to do
elif (key_value_pairs.get('RT') == 'PROJ') and (positional_value_list[0] == 'PROJNAME'): # Non-standard
projection_name = key_value_pairs.get('COMMENT')
if projection_name:
self.spatial_ref = get_spatial_ref_from_wkt(projection_name)
logger.debug('CRS set from .dfn file PROJNAME NAME attribute {}'.format(projection_name))
break # Nothing more to do
elif (key_value_pairs.get('RT') == 'PROJ') and (positional_value_list[0] == 'DATUM'): # Unprojected
projection_name = key_value_pairs.get('NAME')
if projection_name:
self.spatial_ref = get_spatial_ref_from_wkt(projection_name)
logger.debug('CRS set from .dfn file DATUM NAME attribute {}'.format(projection_name))
break # Nothing more to do
# Start of get_field_definitions function
parse_dfn_file(dfn_path)
# Read overriding field definition values from settings
if self.settings.get('field_definitions'):
for field_definition in self.field_definitions:
overriding_field_definition = self.settings['field_definitions'].get(field_definition['short_name'])
if overriding_field_definition:
field_definition.update(overriding_field_definition)
logger.debug('self.dimensions: {}'.format(pformat(self.dimensions)))
logger.debug('self.field_definitions: {}'.format(pformat(self.field_definitions)))
# Check for CRS definition in latitude field - need to do this after short_name has potentially been remapped
if not self.spatial_ref:
try:
field_definition = [field_definition for field_definition in self.field_definitions if field_definition['short_name'] == 'latitude'][0]
crs_string = field_definition['variable_attributes']['datum_name']
self.spatial_ref = get_spatial_ref_from_wkt(crs_string)
logger.debug('CRS set from latitude variable datum_name attribute {}'.format(crs_string))
except:
logger.debug('Unable to set CRS from latitude datum_name attribute')
assert self.spatial_ref, 'Coordinate Reference System undefined'
logger.debug('self.spatial_ref: {}'.format(self.spatial_ref.ExportToPrettyWkt()))
def read_data_file():
'''
Function to read data file into temporary netCDF cache
'''
def create_nc_cache():
'''
Function to create temporary cache file with one 2D variable
Needs to have self.column_count defined to work
'''
self.nc_cache_path = os.path.join(TEMP_DIR, re.sub('\W+', '_', os.path.splitext(self.aem_dat_path)[0]) + '.nc')
self._nc_cache_dataset = netCDF4.Dataset(self.nc_cache_path, mode="w", clobber=True, format='NETCDF4')
self._nc_cache_dataset.createDimension(dimname='rows', size=None) # Unlimited size
self.column_count = 0
for field_definition in self.field_definitions:
short_name = field_definition['short_name']
columns = field_definition['columns']
if columns == 1: # 1D variable
field_dimensions =('rows',)
chunksizes=(CACHE_CHUNK_ROWS,)
else: # 2D variable
field_dimension_name = short_name + '_dim' # Default 2nd dimension name
# Look up any dimension name(s) for this variable from settings
override_definition = self.settings['field_definitions'].get(short_name)
# Determine name of dimension
if override_definition:
override_dimensions = override_definition.get('dimensions')
if override_dimensions:
if type(override_dimensions) == list:
field_dimension_name = '_plus_'.join(override_dimensions) # Multiple partial dimensions
elif type(override_dimensions) == str:
# Note that the "_dim" suffix is required to avoid triggering an obscure netCDF4 bug which seems to occur when
# a dimension is defined with the same name as an existing variable.
# See https://github.com/Unidata/netcdf-c/issues/295
# This does not appear to be an issue when the dimension is defined before the variable, but we can't do that for
# the cache dataset, only the final one
field_dimension_name = override_dimensions + '_dim' # Single dimension
# Create dimension if it doesn't already exist
if field_dimension_name not in self._nc_cache_dataset.dimensions.keys():
self._nc_cache_dataset.createDimension(dimname=field_dimension_name, size=columns)
field_dimensions =('rows', field_dimension_name)
chunksizes=(CACHE_CHUNK_ROWS, columns)
logger.debug('\tCreating cache variable {} with dtype {} and dimension(s) {}'.format(short_name, field_definition['dtype'], field_dimensions))
self._nc_cache_dataset.createVariable(varname=short_name,
datatype=field_definition['dtype'],
dimensions=field_dimensions,
chunksizes=chunksizes,
**NetCDFVariable.DEFAULT_VARIABLE_PARAMETERS
)
self.column_count += columns
try: # Do a sync now to force an error if there are dimension/variable naming conflicts
self._nc_cache_dataset.sync()
logger.debug('NetCDF sync completed')
except:
logger.debug(self._nc_cache_dataset.dimensions, self._nc_cache_dataset.variables)
raise
logger.debug('Created temporary cache file {}'.format(self.nc_cache_path))
def cache_chunk_list(chunk_list, start_row):
'''
Helper function to write list of lists to cache variables
'''
end_row = start_row + len(chunk_list)
logger.debug('Writing rows {}-{} to disk cache'.format(start_row, end_row))
for field_index in range(len(self.field_definitions)):
field_definition = self.field_definitions[field_index]
short_name = field_definition['short_name']
cache_variable = self._nc_cache_dataset.variables[short_name]
chunk_array = np.array([row_list[field_index] for row_list in chunk_list])
#logger.debug('{} cache_variable: {}'.format(cache_variable.name, cache_variable))
#logger.debug('{}: {}-element {} chunk_array: {}'.format(short_name, chunk_array.shape[0], chunk_array.dtype, chunk_array))
cache_variable[start_row:end_row] = chunk_array
self.total_points += len(chunk_list)
def read_fixed_length_fields(line):
'''
Helper function to read fixed length fields into a list of lists
'''
row_list = []
line_column_count = 0
start_char = 0
for field_definition in self.field_definitions:
short_name = field_definition['short_name']
columns = field_definition['columns']
dtype = field_definition['dtype']
aseg_gdf_format = field_definition['format']
column_list = []
for _column_offset in range(columns):
end_char = start_char + field_definition['width_specifier']
value_string = line[start_char:end_char]
# Work-around for badly formatted files with first entry too short
if not aseg_gdf_format.startswith('A') and ' ' in value_string.strip(): # Not a string field and has a space in the middle
value_string = re.match('\s*\S*', value_string).group(0) # Strip anything after non-leading whitespace character
end_char = start_char + len(value_string) # Adjust character offset for next column
value_string = value_string.strip()
try:
if dtype.startswith('int'):
value = int(value_string)
elif dtype.startswith('float'):
value = float(value_string)
else: # Assume | |
= int(j150)
if j150 not in list(self.spikeShape.keys()):
return
if jthr == j150 and self.verbose:
#print '\n%s:' % self.filename
print('Threshold current T and 1.5T the same: using next up value for j150')
print('jthr, j150, len(spikeShape): ', jthr, j150, len(self.spikeShape))
print('1 ', self.spikeShape[jthr][0]['current']*1e12)
print('2 ', self.spikeShape[j150+1][0]['current']*1e12)
print(' >> Threshold current: %8.3f 1.5T current: %8.3f, next up: %8.3f' % (self.spikeShape[jthr][0]['current']*1e12,
self.spikeShape[j150][0]['current']*1e12, self.spikeShape[j150+1][0]['current']*1e12))
j150 = jthr + 1
spikesfound = False
if len(self.spikeShape[j150]) >= 1 and (0 in list(self.spikeShape[j150].keys())) and self.spikeShape[j150][0]['halfwidth'] is not None:
self.analysis_summary['AP1_Latency'] = (self.spikeShape[j150][0]['AP_Latency'] - self.spikeShape[j150][0]['tstart'])*1e3
self.analysis_summary['AP1_HalfWidth'] = self.spikeShape[j150][0]['halfwidth']*1e3
if self.spikeShape[j150][0]['halfwidth_interpolated'] is not None:
self.analysis_summary['AP1_HalfWidth_interpolated'] = self.spikeShape[j150][0]['halfwidth_interpolated']*1e3
else:
self.analysis_summary['AP1_HalfWidth_interpolated'] = np.nan
spikesfound = True
if len(self.spikeShape[j150]) >= 2 and (1 in list(self.spikeShape[j150].keys())) and self.spikeShape[j150][1]['halfwidth'] is not None:
self.analysis_summary['AP2_Latency'] = (self.spikeShape[j150][1]['AP_Latency'] - self.spikeShape[j150][1]['tstart'])*1e3
self.analysis_summary['AP2_HalfWidth'] = self.spikeShape[j150][1]['halfwidth']*1e3
if self.spikeShape[j150][1]['halfwidth_interpolated'] is not None:
self.analysis_summary['AP2_HalfWidth_interpolated'] = self.spikeShape[j150][1]['halfwidth_interpolated']*1e3
else:
self.analysis_summary['AP2_HalfWidth_interpolated'] = np.nan
if spikesfound:
rate = len(self.spikeShape[j150])/self.spikeShape[j150][0]['pulseDuration'] # spikes per second, normalized for pulse duration
AHPDepth = self.spikeShape[j150][0]['AP_beginV'] - self.spikeShape[j150][0]['trough_V'] # from first spike # first AHP depth
print(f"AHP: Begin = {self.spikeShape[j150][0]['AP_beginV']*1e3:.2f} mV")
print(f" Trough = {self.spikeShape[j150][0]['trough_V']*1e3:.2f} mV")
print(f" Depth = {AHPDepth*1e3:.2f} mV")
self.analysis_summary['FiringRate_1p5T'] = rate
self.analysis_summary['AHP_Depth'] = AHPDepth*1e3 # convert to mV
def fitOne(self, x=None, yd=None, info='', function=None, fixNonMonotonic=True, excludeNonMonotonic=False):
"""Fit the FI plot to an equation that is piecewise linear up to the threshold
called Ibreak, then (1-exp(F/Frate)) for higher currents
Parameters
----------
x : numpy array (no default)
The x data to fit (typically an array of current levels)
yd : numpy array (no default)
The y data to fit (typically an array of spike counts)
if x and yd are none, we extrace from the 'FI_Curve' for this cell.
info : string (default: '')
information to add to a fitted plot
fixNonMonotonic : Boolean (default: True)
If True, only use data up to the maximal firing rate,
discarding the remainder of the steps under the assumption
that the cell is entering depolarization block.
excludeNonMonotonic : Boolean (default: False)
if True, does not even try to fit, and returns None
Returns
-------
None if there is no fitting to be done (excluding non-monotonic or no spikes)
tuple of (fpar, xf, yf, names, error, f, func)
These are the fit parameters
"""
# print('fitone called')
if function is not None:
self.FIGrowth = function
if x is None: # use class data
x = self.analysis_summary['FI_Curve'][0]*1e9
yd = self.analysis_summary['FI_Curve'][1]/self.analysis_summary['pulseDuration'] # convert to rate in spikes/second
if self.FIGrowth == 'fitOneOriginal':
ymax = np.max(yd)
ymax_a = 0.8*ymax
if ymax <= 0.:
return(None)
nonmono = 0
if fixNonMonotonic: # clip at max firing rate
ydiff = np.gradient(yd, x)
xnm = np.where(ydiff < 0.)[0]
if len(xnm) > 0:
imax = xnm[0]+1
else:
imax = len(yd)
# imaxs = [i for i, y in enumerate(yd) if y >= ymax_a] # handle duplicate firing rates
# imax = max(imaxs) # find highest index
dypos = range(0, imax)
x = x[dypos]
yd = yd[dypos]
ymax = np.max(yd)
if np.max(x) < 0.: # skip if max rate is < 0 current
return(None)
ymin = 5.
if ymax < ymin:
ymin = 0.
if ymax > yd[-1] and excludeNonMonotonic:
nonmono += 1
return(None)
# fpnt = np.where(yd > 0) # find first point where cell fires
fire_points = np.where((yd[:-1] > 0) & (yd[1:] > 0))[0] # limit to positive current injections with successive spikes
if len(fire_points) == 0:
return(None)
fbr = fire_points[0]
ibreak0 = x[fbr-1] # use point before first spike as the initial break point
dx = np.abs(np.mean(np.diff(x))) # get current steps
xp = x[fire_points]
xp = xp - ibreak0 - dx
yp = yd[fire_points] # save data with responses
testMethod = "simplex" # 'SLSQP' # L-BFGS-B simplex, SLSQP, 'TNC', 'COBYLA'
if fbr-2 >= 0:
x0 = fbr-2
else:
x0 = 0
if fbr < len(x):
x1 = fbr
else:
x1 = len(x)-1
res = []
err = []
fitter = Fitting.Fitting() # make sure we always work with the same instance
for i in range(-4, 4): # allow breakpoint to move
if fbr + i + 1 > len(x)-1:
continue
x0 = fbr+i
for j in range(0,4): # then vary the width of the linear region
x1 = x0 + j
if x1 >= len(x):
continue
bounds = ((0., 0.), np.sort([x[x0], x[x1]]),
(0., 2.*yp[0]), (0., ymax*10.0), (1e-5, 1e5))
# parameters for FIGrowth 1: ['Fzero', 'Ibreak', 'F1amp', 'F2amp', 'Irate']
# if i == -4 and j == 0:
fitbreak0 = ibreak0
initpars = [0., np.min(bounds[1]),
0., np.mean(bounds[3]), np.mean(bounds[4])]
func = 'FIGrowthExpBreak'
f = fitter.fitfuncmap[func]
(fpar, xf, yf, names) = fitter.FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=np.max(x),
fitFunc=func, fitPars=initpars, bounds=bounds,
fixedPars=None, method=testMethod)
error = fitter.getFitErr()
res.append({'fpar': fpar, 'xf': xf, 'yf': yf, 'names': names, 'error': error})
err.append(error)
minerr = np.argmin(err)
fpar = res[minerr]['fpar']
xf = res[minerr]['xf']
yf = res[minerr]['yf']
names = res[minerr]['names']
error = res[minerr]['error']
else: # recompute some stuff
# estimate initial parameters and set region of IV curve to be used for fitting
ymax = np.max(yd) # maximum spike rate (not count; see above)
if ymax == 0:
return None
ymax_nm = 0.8*np.max(yd) # maximum spike rate (not count; see above)
dypos = range(len(x))
if fixNonMonotonic and ymax_nm > yd[-1]: # fix non-monotinic firing - clip fitting to current that generates the max firing rate
imaxs = [i for i, y in enumerate(yd) if y >= ymax_nm] # handle duplicate firing rates
imax = max(imaxs) # find highest index
dypos = list(range(0, imax+1))
x = x[dypos] # restrict current and response range to those currents
yd = yd[dypos]
ymax = np.max(yd)
if np.max(x) < 0.: # skip if max rate occurs at negative current level
return None
ymin = 5
if ymax < ymin:
ymin = 0.
if ymax > yd[-1] and excludeNonMonotonic:
nonmono += 1
return None
# Now find first point where cell fires and next step also has cell firing
fire_points = np.where((yd[:-1] > 0) & (yd[1:] > 0))[0] # limit to positive current injections with successive spikes
fbr = fire_points[0]
testMethod = 'SLSQP' # 'SLSQP' # L-BFGS-B simplex, SLSQP, 'TNC', 'COBYLA'
if fbr - 1 >= 0: # set start and end of linear fit
x0 = fbr - 1 # x0 is last point (in current) with no spikes
else:
x0 = 0
if fbr < len(x): # x1 is the next point, which has a spike
x1 = fbr
else:
x1 = len(x) - 1
ibreak0 = x[x0] # use point before first spike as the initial break point
if self.FIGrowth == 'FIGrowthExpBreak':
# print('Exponential model fit')
ixb = fbr # np.argwhere(yd > 0)[0][0]
cons = ( {'type': 'eq', 'fun': lambda xc: xc[0]}, # lock F0 at >= 0
{'type': 'ineq', 'fun': lambda xc: xc[1] - x[ixb-1]}, # ibreak between last no spike and first spiking level
{'type': 'ineq', 'fun': lambda xc: x[ixb] - xc[1]}, # ibreak between last no spike and first spiking level
{'type': 'eq', 'fun': lambda xc: xc[2]}, # F1amp >= 0
{'type': 'ineq', 'fun': lambda xc: xc[3] - xc[2]}, # F2amp > F1amp (must be!)
{'type': 'ineq', 'fun': lambda xc: xc[4]},
)
bounds = ((0., yd[fbr-1]+5), np.sort([x[x0], x[x1]]),
(0., 2*yd[fbr]), (0., ymax*10.0), (0, 1e5))
# # parameters for FIGrowth 1: ['Fzero', 'Ibreak', 'F1amp', 'F2amp', 'Irate']
initpars = [0., ibreak0, yd[fbr], ymax*2, 0.01*np.max(np.diff(yd)/np.diff(x))]
func = 'FIGrowthExpBreak'
fitbreak0 = x[fbr]
f = Fitting.Fitting().fitfuncmap[func]
# now fit the full data set
(fpar, xf, yf, names) = Fitting.Fitting().FitRegion(np.array([1]), 0, x, yd, t0=fitbreak0, t1=x[dypos[-1]],
fitFunc=func, fitPars=initpars, bounds=bounds, constraints=cons, weights=None, #np.sqrt,
fixedPars=None, method=testMethod)
error = Fitting.Fitting().getFitErr()
self.FIKeys = f[6]
elif self.FIGrowth == 'FIGrowthExp': # FIGrowth is 2, Exponential from 0 rate
bounds = (np.sort([x[x0], x[x1]]),
(0., ymax*5.0), (0.0001, 1000.))
# # parameters for FIGrowth 2: [''Ibreak', 'F2amp', 'Irate']
fitbreak0 = ibreak0
if fitbreak0 > 0.:
fitbreak0 = 0.
initpars = [ibreak0, ymax/2., 0.001]
func = 'FIGrowthExp'
f | |
<gh_stars>10-100
import mindspore.ops as P
from mindspore import nn
class Module0(nn.Cell):
def __init__(self, batchnorm2d_1_num_features, conv2d_3_in_channels):
super(Module0, self).__init__()
self.concat_0 = P.Concat(axis=1)
self.batchnorm2d_1 = nn.BatchNorm2d(num_features=batchnorm2d_1_num_features,
eps=9.999999747378752e-06,
momentum=0.8999999761581421)
self.relu_2 = nn.ReLU()
self.conv2d_3 = nn.Conv2d(in_channels=conv2d_3_in_channels,
out_channels=128,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_4 = nn.ReLU()
self.conv2d_5 = nn.Conv2d(in_channels=128,
out_channels=32,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=False)
def construct(self, x):
opt_concat_0 = self.concat_0((x,))
opt_batchnorm2d_1 = self.batchnorm2d_1(opt_concat_0)
opt_relu_2 = self.relu_2(opt_batchnorm2d_1)
opt_conv2d_3 = self.conv2d_3(opt_relu_2)
opt_relu_4 = self.relu_4(opt_conv2d_3)
opt_conv2d_5 = self.conv2d_5(opt_relu_4)
return opt_conv2d_5
class Module1(nn.Cell):
def __init__(self, batchnorm2d_0_num_features):
super(Module1, self).__init__()
self.batchnorm2d_0 = nn.BatchNorm2d(num_features=batchnorm2d_0_num_features,
eps=9.999999747378752e-06,
momentum=0.8999999761581421)
self.relu_1 = nn.ReLU()
def construct(self, x):
opt_batchnorm2d_0 = self.batchnorm2d_0(x)
opt_relu_1 = self.relu_1(opt_batchnorm2d_0)
return opt_relu_1
class Module4(nn.Cell):
def __init__(self, conv2d_0_in_channels, module1_0_batchnorm2d_0_num_features):
super(Module4, self).__init__()
self.module1_0 = Module1(batchnorm2d_0_num_features=module1_0_batchnorm2d_0_num_features)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=128,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=128,
out_channels=32,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=False)
def construct(self, x):
module1_0_opt = self.module1_0(x)
opt_conv2d_0 = self.conv2d_0(module1_0_opt)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
return opt_conv2d_2
class MainModel(nn.Cell):
def __init__(self):
super(MainModel, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3, 3, 3),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.pad_maxpool2d_2 = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)))
self.maxpool2d_2 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
self.module0_0 = Module0(batchnorm2d_1_num_features=64, conv2d_3_in_channels=64)
self.concat_9 = P.Concat(axis=1)
self.module4_0 = Module4(conv2d_0_in_channels=96, module1_0_batchnorm2d_0_num_features=96)
self.concat_15 = P.Concat(axis=1)
self.module4_1 = Module4(conv2d_0_in_channels=128, module1_0_batchnorm2d_0_num_features=128)
self.concat_21 = P.Concat(axis=1)
self.module4_2 = Module4(conv2d_0_in_channels=160, module1_0_batchnorm2d_0_num_features=160)
self.concat_27 = P.Concat(axis=1)
self.module4_3 = Module4(conv2d_0_in_channels=192, module1_0_batchnorm2d_0_num_features=192)
self.concat_33 = P.Concat(axis=1)
self.module4_4 = Module4(conv2d_0_in_channels=224, module1_0_batchnorm2d_0_num_features=224)
self.concat_39 = P.Concat(axis=1)
self.module1_0 = Module1(batchnorm2d_0_num_features=256)
self.conv2d_42 = nn.Conv2d(in_channels=256,
out_channels=128,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=False)
self.pad_avgpool2d_43 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
self.avgpool2d_43 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
self.module0_1 = Module0(batchnorm2d_1_num_features=128, conv2d_3_in_channels=128)
self.concat_50 = P.Concat(axis=1)
self.module4_5 = Module4(conv2d_0_in_channels=160, module1_0_batchnorm2d_0_num_features=160)
self.concat_56 = P.Concat(axis=1)
self.module4_6 = Module4(conv2d_0_in_channels=192, module1_0_batchnorm2d_0_num_features=192)
self.concat_62 = P.Concat(axis=1)
self.module4_7 = Module4(conv2d_0_in_channels=224, module1_0_batchnorm2d_0_num_features=224)
self.concat_68 = P.Concat(axis=1)
self.module4_8 = Module4(conv2d_0_in_channels=256, module1_0_batchnorm2d_0_num_features=256)
self.concat_74 = P.Concat(axis=1)
self.module4_9 = Module4(conv2d_0_in_channels=288, module1_0_batchnorm2d_0_num_features=288)
self.concat_80 = P.Concat(axis=1)
self.module4_10 = Module4(conv2d_0_in_channels=320, module1_0_batchnorm2d_0_num_features=320)
self.concat_86 = P.Concat(axis=1)
self.module4_11 = Module4(conv2d_0_in_channels=352, module1_0_batchnorm2d_0_num_features=352)
self.concat_92 = P.Concat(axis=1)
self.module4_12 = Module4(conv2d_0_in_channels=384, module1_0_batchnorm2d_0_num_features=384)
self.concat_98 = P.Concat(axis=1)
self.module4_13 = Module4(conv2d_0_in_channels=416, module1_0_batchnorm2d_0_num_features=416)
self.concat_104 = P.Concat(axis=1)
self.module4_14 = Module4(conv2d_0_in_channels=448, module1_0_batchnorm2d_0_num_features=448)
self.concat_110 = P.Concat(axis=1)
self.module4_15 = Module4(conv2d_0_in_channels=480, module1_0_batchnorm2d_0_num_features=480)
self.concat_116 = P.Concat(axis=1)
self.module1_1 = Module1(batchnorm2d_0_num_features=512)
self.conv2d_119 = nn.Conv2d(in_channels=512,
out_channels=256,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=False)
self.pad_avgpool2d_120 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
self.avgpool2d_120 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
self.module0_2 = Module0(batchnorm2d_1_num_features=256, conv2d_3_in_channels=256)
self.concat_127 = P.Concat(axis=1)
self.module4_16 = Module4(conv2d_0_in_channels=288, module1_0_batchnorm2d_0_num_features=288)
self.concat_133 = P.Concat(axis=1)
self.module4_17 = Module4(conv2d_0_in_channels=320, module1_0_batchnorm2d_0_num_features=320)
self.concat_139 = P.Concat(axis=1)
self.module4_18 = Module4(conv2d_0_in_channels=352, module1_0_batchnorm2d_0_num_features=352)
self.concat_145 = P.Concat(axis=1)
self.module4_19 = Module4(conv2d_0_in_channels=384, module1_0_batchnorm2d_0_num_features=384)
self.concat_151 = P.Concat(axis=1)
self.module4_20 = Module4(conv2d_0_in_channels=416, module1_0_batchnorm2d_0_num_features=416)
self.concat_157 = P.Concat(axis=1)
self.module4_21 = Module4(conv2d_0_in_channels=448, module1_0_batchnorm2d_0_num_features=448)
self.concat_163 = P.Concat(axis=1)
self.module4_22 = Module4(conv2d_0_in_channels=480, module1_0_batchnorm2d_0_num_features=480)
self.concat_169 = P.Concat(axis=1)
self.module4_23 = Module4(conv2d_0_in_channels=512, module1_0_batchnorm2d_0_num_features=512)
self.concat_175 = P.Concat(axis=1)
self.module4_24 = Module4(conv2d_0_in_channels=544, module1_0_batchnorm2d_0_num_features=544)
self.concat_181 = P.Concat(axis=1)
self.module4_25 = Module4(conv2d_0_in_channels=576, module1_0_batchnorm2d_0_num_features=576)
self.concat_187 = P.Concat(axis=1)
self.module4_26 = Module4(conv2d_0_in_channels=608, module1_0_batchnorm2d_0_num_features=608)
self.concat_193 = P.Concat(axis=1)
self.module4_27 = Module4(conv2d_0_in_channels=640, module1_0_batchnorm2d_0_num_features=640)
self.concat_199 = P.Concat(axis=1)
self.module4_28 = Module4(conv2d_0_in_channels=672, module1_0_batchnorm2d_0_num_features=672)
self.concat_205 = P.Concat(axis=1)
self.module4_29 = Module4(conv2d_0_in_channels=704, module1_0_batchnorm2d_0_num_features=704)
self.concat_211 = P.Concat(axis=1)
self.module4_30 = Module4(conv2d_0_in_channels=736, module1_0_batchnorm2d_0_num_features=736)
self.concat_217 = P.Concat(axis=1)
self.module4_31 = Module4(conv2d_0_in_channels=768, module1_0_batchnorm2d_0_num_features=768)
self.concat_223 = P.Concat(axis=1)
self.module4_32 = Module4(conv2d_0_in_channels=800, module1_0_batchnorm2d_0_num_features=800)
self.concat_229 = P.Concat(axis=1)
self.module4_33 = Module4(conv2d_0_in_channels=832, module1_0_batchnorm2d_0_num_features=832)
self.concat_235 = P.Concat(axis=1)
self.module4_34 = Module4(conv2d_0_in_channels=864, module1_0_batchnorm2d_0_num_features=864)
self.concat_241 = P.Concat(axis=1)
self.module4_35 = Module4(conv2d_0_in_channels=896, module1_0_batchnorm2d_0_num_features=896)
self.concat_247 = P.Concat(axis=1)
self.module4_36 = Module4(conv2d_0_in_channels=928, module1_0_batchnorm2d_0_num_features=928)
self.concat_253 = P.Concat(axis=1)
self.module4_37 = Module4(conv2d_0_in_channels=960, module1_0_batchnorm2d_0_num_features=960)
self.concat_259 = P.Concat(axis=1)
self.module4_38 = Module4(conv2d_0_in_channels=992, module1_0_batchnorm2d_0_num_features=992)
self.concat_265 = P.Concat(axis=1)
self.module1_2 = Module1(batchnorm2d_0_num_features=1024)
self.conv2d_268 = nn.Conv2d(in_channels=1024,
out_channels=512,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=False)
self.pad_avgpool2d_269 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
self.avgpool2d_269 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
self.module0_3 = Module0(batchnorm2d_1_num_features=512, conv2d_3_in_channels=512)
self.concat_276 = P.Concat(axis=1)
self.module4_39 = Module4(conv2d_0_in_channels=544, module1_0_batchnorm2d_0_num_features=544)
self.concat_282 = P.Concat(axis=1)
self.module4_40 = Module4(conv2d_0_in_channels=576, module1_0_batchnorm2d_0_num_features=576)
self.concat_288 = P.Concat(axis=1)
self.module4_41 = Module4(conv2d_0_in_channels=608, module1_0_batchnorm2d_0_num_features=608)
self.concat_294 = P.Concat(axis=1)
self.module4_42 = Module4(conv2d_0_in_channels=640, module1_0_batchnorm2d_0_num_features=640)
self.concat_300 = P.Concat(axis=1)
self.module4_43 = Module4(conv2d_0_in_channels=672, module1_0_batchnorm2d_0_num_features=672)
self.concat_306 = P.Concat(axis=1)
self.module4_44 = Module4(conv2d_0_in_channels=704, module1_0_batchnorm2d_0_num_features=704)
self.concat_312 = P.Concat(axis=1)
self.module4_45 = Module4(conv2d_0_in_channels=736, module1_0_batchnorm2d_0_num_features=736)
self.concat_318 = P.Concat(axis=1)
self.module4_46 = Module4(conv2d_0_in_channels=768, module1_0_batchnorm2d_0_num_features=768)
self.concat_324 = P.Concat(axis=1)
self.module4_47 = Module4(conv2d_0_in_channels=800, module1_0_batchnorm2d_0_num_features=800)
self.concat_330 = P.Concat(axis=1)
self.module4_48 = Module4(conv2d_0_in_channels=832, module1_0_batchnorm2d_0_num_features=832)
self.concat_336 = P.Concat(axis=1)
self.module4_49 = Module4(conv2d_0_in_channels=864, module1_0_batchnorm2d_0_num_features=864)
self.concat_342 = P.Concat(axis=1)
self.module4_50 = Module4(conv2d_0_in_channels=896, module1_0_batchnorm2d_0_num_features=896)
self.concat_348 = P.Concat(axis=1)
self.module4_51 = Module4(conv2d_0_in_channels=928, module1_0_batchnorm2d_0_num_features=928)
self.concat_354 = P.Concat(axis=1)
self.module4_52 = Module4(conv2d_0_in_channels=960, module1_0_batchnorm2d_0_num_features=960)
self.concat_360 = P.Concat(axis=1)
self.module4_53 = Module4(conv2d_0_in_channels=992, module1_0_batchnorm2d_0_num_features=992)
self.concat_366 = P.Concat(axis=1)
self.module1_3 = Module1(batchnorm2d_0_num_features=1024)
self.avgpool2d_369 = nn.AvgPool2d(kernel_size=(7, 7))
self.flatten_370 = nn.Flatten()
self.dense_371 = nn.Dense(in_channels=1024, out_channels=1000, has_bias=True)
def construct(self, input_1):
opt_conv2d_0 = self.conv2d_0(input_1)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_maxpool2d_2 = self.pad_maxpool2d_2(opt_relu_1)
opt_maxpool2d_2 = self.maxpool2d_2(opt_maxpool2d_2)
module0_0_opt = self.module0_0(opt_maxpool2d_2)
opt_concat_9 = self.concat_9((opt_maxpool2d_2, module0_0_opt,))
module4_0_opt = self.module4_0(opt_concat_9)
opt_concat_15 = self.concat_15((opt_maxpool2d_2, module0_0_opt, module4_0_opt,))
module4_1_opt = self.module4_1(opt_concat_15)
opt_concat_21 = self.concat_21((opt_maxpool2d_2, module0_0_opt, module4_0_opt, module4_1_opt,))
module4_2_opt = self.module4_2(opt_concat_21)
opt_concat_27 = self.concat_27((opt_maxpool2d_2, module0_0_opt, module4_0_opt, module4_1_opt, module4_2_opt,))
module4_3_opt = self.module4_3(opt_concat_27)
opt_concat_33 = self.concat_33(
(opt_maxpool2d_2, module0_0_opt, module4_0_opt, module4_1_opt, module4_2_opt, module4_3_opt,
))
module4_4_opt = self.module4_4(opt_concat_33)
opt_concat_39 = self.concat_39(
(opt_maxpool2d_2, module0_0_opt, module4_0_opt, module4_1_opt, module4_2_opt, module4_3_opt, module4_4_opt,
))
module1_0_opt = self.module1_0(opt_concat_39)
opt_conv2d_42 = self.conv2d_42(module1_0_opt)
opt_avgpool2d_43 = self.pad_avgpool2d_43(opt_conv2d_42)
opt_avgpool2d_43 = self.avgpool2d_43(opt_avgpool2d_43)
module0_1_opt = self.module0_1(opt_avgpool2d_43)
opt_concat_50 = self.concat_50((opt_avgpool2d_43, module0_1_opt,))
module4_5_opt = self.module4_5(opt_concat_50)
opt_concat_56 = self.concat_56((opt_avgpool2d_43, module0_1_opt, module4_5_opt,))
module4_6_opt = self.module4_6(opt_concat_56)
opt_concat_62 = self.concat_62((opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt,))
module4_7_opt = self.module4_7(opt_concat_62)
opt_concat_68 = self.concat_68((opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt,))
module4_8_opt = self.module4_8(opt_concat_68)
opt_concat_74 = self.concat_74(
(opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt, module4_8_opt,
))
module4_9_opt = self.module4_9(opt_concat_74)
opt_concat_80 = self.concat_80(
(opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt, module4_8_opt, module4_9_opt,
))
module4_10_opt = self.module4_10(opt_concat_80)
opt_concat_86 = self.concat_86((opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt,
module4_8_opt, module4_9_opt, module4_10_opt,
))
module4_11_opt = self.module4_11(opt_concat_86)
opt_concat_92 = self.concat_92((opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt,
module4_8_opt, module4_9_opt, module4_10_opt, module4_11_opt,
))
module4_12_opt = self.module4_12(opt_concat_92)
opt_concat_98 = self.concat_98((opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt,
module4_8_opt, module4_9_opt, module4_10_opt, module4_11_opt, module4_12_opt,
))
module4_13_opt = self.module4_13(opt_concat_98)
opt_concat_104 = self.concat_104(
(opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt, module4_8_opt, module4_9_opt,
module4_10_opt, module4_11_opt, module4_12_opt, module4_13_opt,
))
module4_14_opt = self.module4_14(opt_concat_104)
opt_concat_110 = self.concat_110(
(opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt, module4_8_opt, module4_9_opt,
module4_10_opt, module4_11_opt, module4_12_opt, module4_13_opt, module4_14_opt,
))
module4_15_opt = self.module4_15(opt_concat_110)
opt_concat_116 = self.concat_116(
(opt_avgpool2d_43, module0_1_opt, module4_5_opt, module4_6_opt, module4_7_opt, module4_8_opt, module4_9_opt,
module4_10_opt, module4_11_opt, module4_12_opt, module4_13_opt, module4_14_opt, module4_15_opt,
))
module1_1_opt = self.module1_1(opt_concat_116)
opt_conv2d_119 = self.conv2d_119(module1_1_opt)
opt_avgpool2d_120 = self.pad_avgpool2d_120(opt_conv2d_119)
opt_avgpool2d_120 = self.avgpool2d_120(opt_avgpool2d_120)
module0_2_opt = self.module0_2(opt_avgpool2d_120)
opt_concat_127 = self.concat_127((opt_avgpool2d_120, module0_2_opt,))
module4_16_opt = self.module4_16(opt_concat_127)
opt_concat_133 = self.concat_133((opt_avgpool2d_120, module0_2_opt, module4_16_opt,))
module4_17_opt = self.module4_17(opt_concat_133)
opt_concat_139 = self.concat_139((opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt,))
module4_18_opt = self.module4_18(opt_concat_139)
opt_concat_145 = self.concat_145(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt,
))
module4_19_opt = self.module4_19(opt_concat_145)
opt_concat_151 = self.concat_151(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
))
module4_20_opt = self.module4_20(opt_concat_151)
opt_concat_157 = self.concat_157((opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt,
module4_18_opt, module4_19_opt, module4_20_opt,
))
module4_21_opt = self.module4_21(opt_concat_157)
opt_concat_163 = self.concat_163((opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt,
module4_18_opt, module4_19_opt, module4_20_opt, module4_21_opt,
))
module4_22_opt = self.module4_22(opt_concat_163)
opt_concat_169 = self.concat_169(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt,
))
module4_23_opt = self.module4_23(opt_concat_169)
opt_concat_175 = self.concat_175(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt,
))
module4_24_opt = self.module4_24(opt_concat_175)
opt_concat_181 = self.concat_181(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt,
))
module4_25_opt = self.module4_25(opt_concat_181)
opt_concat_187 = self.concat_187(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
))
module4_26_opt = self.module4_26(opt_concat_187)
opt_concat_193 = self.concat_193(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
module4_26_opt,
))
module4_27_opt = self.module4_27(opt_concat_193)
opt_concat_199 = self.concat_199(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
module4_26_opt, module4_27_opt,
))
module4_28_opt = self.module4_28(opt_concat_199)
opt_concat_205 = self.concat_205(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
module4_26_opt, module4_27_opt, module4_28_opt,
))
module4_29_opt = self.module4_29(opt_concat_205)
opt_concat_211 = self.concat_211(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
module4_26_opt, module4_27_opt, module4_28_opt, module4_29_opt,
))
module4_30_opt = self.module4_30(opt_concat_211)
opt_concat_217 = self.concat_217(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
module4_26_opt, module4_27_opt, module4_28_opt, module4_29_opt, module4_30_opt,
))
module4_31_opt = self.module4_31(opt_concat_217)
opt_concat_223 = self.concat_223(
(opt_avgpool2d_120, module0_2_opt, module4_16_opt, module4_17_opt, module4_18_opt, module4_19_opt,
module4_20_opt, module4_21_opt, module4_22_opt, module4_23_opt, module4_24_opt, module4_25_opt,
module4_26_opt, module4_27_opt, module4_28_opt, module4_29_opt, module4_30_opt, module4_31_opt,
))
module4_32_opt = self.module4_32(opt_concat_223)
opt_concat_229 = self.concat_229(
(opt_avgpool2d_120, | |
<reponame>insikk/quora_question_duplicate
import random
import itertools
import numpy as np
import tensorflow as tf
from read_data import DataSet
from tensorflow.contrib.rnn import BasicLSTMCell, LSTMStateTuple
from tensorflow.python.ops.rnn import dynamic_rnn
from mytensorflow import get_initializer
from rnn import get_last_relevant_rnn_output, get_sequence_length
from nn import multi_conv1d, highway_network
from rnn_cell import SwitchableDropoutWrapper
def get_multi_gpu_models(config):
models = []
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)):
if gpu_idx > 0:
tf.get_variable_scope().reuse_variables()
model = Model(config, scope, rep=gpu_idx == 0)
models.append(model)
return models
class Model(object):
def __init__(self, config, scope, rep=True):
self.scope = scope
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, JX, VW, VC, W = \
config.batch_size, config.max_sent_size, \
config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, None], name='x')
self.cx = tf.placeholder('int32', [N, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None], name='x_mask')
self.x_length = tf.placeholder('int32', [N], name='x_length')
self.y = tf.placeholder('int32', [N, None], name='y')
self.cy = tf.placeholder('int32', [N, None, W], name='cy')
self.y_mask = tf.placeholder('bool', [N, None], name='y_mask')
self.y_length = tf.placeholder('int32', [N], name='y_length')
self.z = tf.placeholder('float32', [N, 3], name='z')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
# Define misc
self.tensor_dict = {}
self.h_dim = config.hidden_size
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
self.na_prob = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if config.mode == 'train':
self._build_ema()
self.summary = tf.summary.merge_all()
self.summary = tf.summary.merge(tf.get_collection("summaries", scope=self.scope))
def _encoder(self, input_seq, input_seq_length, name="", reuse=False):
with tf.variable_scope("Encoder") as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
cell_fw = BasicLSTMCell(self.h_dim, state_is_tuple=True, reuse=reuse)
cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob = self.config.input_keep_prob)
cell_bw = BasicLSTMCell(self.h_dim, state_is_tuple=True, reuse=reuse)
cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob = self.config.input_keep_prob)
(encoder_outputs, encoder_state) = tf.nn.bidirectional_dynamic_rnn(cell_fw,
cell_bw,
inputs=input_seq,
sequence_length=input_seq_length,
dtype=tf.float32,
scope='enc')
# Join outputs since we are using a bidirectional RNN
encoder_outputs = tf.concat(encoder_outputs, 2)
if isinstance(encoder_state[0], LSTMStateTuple):
encoder_state_c = tf.concat(
(encoder_state[0].c, encoder_state[1].c), 1, name='bidirectional_concat_c')
encoder_state_h = tf.concat(
(encoder_state[0].h, encoder_state[1].h), 1, name='bidirectional_concat_h')
encoder_state = LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
return encoder_outputs, encoder_state
def _build_forward(self):
config = self.config
N, JX, VW, VC, d, W = \
config.batch_size, config.max_sent_size, \
config.word_vocab_size, config.char_vocab_size, \
config.hidden_size, config.max_word_size
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
# Getting word vector
with tf.variable_scope("emb"):
if config.use_char_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, JX, W, dc]
Acy = tf.nn.embedding_lookup(char_emb_mat, self.cy) # [N, JX, W, dc]
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
yy = multi_conv1d(Acy, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
yy = multi_conv1d(Acy, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="yy")
xx = tf.reshape(xx, [-1, JX, dco])
yy = tf.reshape(yy, [-1, JX, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, JX, d]
Ay = tf.nn.embedding_lookup(word_emb_mat, self.y) # [N, JX, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['y'] = Ay
if config.use_char_emb:
xx = tf.concat(axis=2, values=[xx, Ax]) # [N, M, JX, di]
yy = tf.concat(axis=2, values=[yy, Ay]) # [N, JQ, di]
else:
xx = Ax
yy = Ay
# highway network
if config.highway:
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
yy = highway_network(yy, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['yy'] = yy
self.x_output, self.x_state = self._encoder(xx, self.x_length)
self.y_output, self.y_state = self._encoder(yy, self.y_length, reuse=True) # use the same sentence encoder.
length = get_sequence_length(self.x_output)
self.X = get_last_relevant_rnn_output(self.x_output, length)
length = get_sequence_length(self.y_output)
self.Y = get_last_relevant_rnn_output(self.y_output, length)
self.h0 = tf.concat((self.X, self.Y), 1)
self.W1 = tf.get_variable("W1", shape=[self.h_dim * 4, 200])
self.b1 = tf.get_variable("b1", shape=[200])
self.a1 = tf.nn.relu(tf.add(tf.matmul(self.h0, self.W1), self.b1))
self.W2 = tf.get_variable("W2", shape=[200, 200])
self.b2 = tf.get_variable("b2", shape=[200])
self.a2 = tf.nn.relu(tf.add(tf.matmul(self.a1, self.W2), self.b2))
self.W3 = tf.get_variable("W3", shape=[200, 200])
self.b3 = tf.get_variable("b3", shape=[200])
self.a3 = tf.nn.relu(tf.add(tf.matmul(self.a2, self.W3), self.b3))
self.W_pred = tf.get_variable("W_pred", shape=[200, 3])
self.logits = tf.matmul(self.a3, self.W_pred)
print("logits:", self.logits)
def _enc_dec(input_sequence):
"""
return: (sent_repr, loss)
sent_repr: hidden layer output of sentence encoder
loss: reconstruction loss. You can use this loss for multi-task learning. Or you could just use encoder part.
"""
# create. auto encoder style. target_ouptut
self.encoder_inputs_embedded = xx # x1, x2, ..., x_n
self.decoder_train_inputs_embedded = yy # <GO>, x1, x2, ..., x_(n-1)
self.decoder_train_length = self.y_length
self.decoder_train_targets = self.x
# Sentence Encoder.
with tf.variable_scope("Encoder") as scope:
# Using biLSTM
cell_fw = BasicLSTMCell(self.h_dim, state_is_tuple=True)
cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob = config.input_keep_prob)
cell_bw = BasicLSTMCell(self.h_dim, state_is_tuple=True)
cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob = config.input_keep_prob)
(encoder_outputs, encoder_state) = tf.nn.bidirectional_dynamic_rnn(cell_fw,
cell_bw,
inputs=encoder_inputs_embedded,
sequence_length=self.x_length,
dtype=tf.float32,
scope='enc')
# Join outputs since we are using a bidirectional RNN
encoder_outputs = tf.concat(encoder_outputs, 2)
if isinstance(self.encoder_state[0], LSTMStateTuple):
encoder_state_c = tf.concat(
(self.encoder_state[0].c, self.encoder_state[1].c), 1, name='bidirectional_concat_c')
encoder_state_h = tf.concat(
(self.encoder_state[0].h, self.encoder_state[1].h), 1, name='bidirectional_concat_h')
self.encoder_state = LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
self.decoder_hidden = self.h_dim*2
with tf.variable_scope("Decoder") as scope:
decoder_cell = LSTMCell(self.decoder_hidden, state_is_tuple=True)
helper = seq2seq.TrainingHelper(self.decoder_train_inputs_embedded, self.decoder_train_length)
# Try schduled training helper. It may increase performance.
decoder = seq2seq.BasicDecoder(
cell=decoder_cell,
helper=helper,
initial_state=self.encoder_state
)
# Try AttentionDecoder.
self.decoder_outputs_train, self.decoder_state_train = seq2seq.dynamic_decode(
decoder,
impute_finished=True,
scope=scope,
)
self.decoder_logits = self.decoder_outputs_train.rnn_output
w_t = tf.get_variable("proj_w", [self.vocab_size, self.decoder_hidden], dtype=tf.float32)
w = tf.transpose(w_t)
b = tf.get_variable("proj_b", [self.vocab_size], dtype=tf.float32)
self.output_projection = (w, b)
m = tf.matmul(tf.reshape(self.decoder_logits, [-1, self.decoder_hidden]), w)
self.decoder_prediction_train = tf.argmax(
tf.reshape(m, [N, -1, self.vocab_size]) + self.output_projection[1],
axis=-1,
name='decoder_prediction_train')
def sampled_loss(labels, inputs):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(tf.transpose(self.output_projection[0]), tf.float32)
local_b = tf.cast(self.output_projection[1], tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(
weights=local_w_t,
biases=local_b,
labels=labels,
inputs=local_inputs,
num_sampled=self.vocab_size // 10,
num_classes=self.vocab_size),
tf.float32)
loss = seq2seq.sequence_loss(
logits=self.decoder_logits,
targets=self.decoder_train_targets,
weights=tf.sequence_mask(self.x_length, tf.shape(self.x)[1], dtype=tf.float32, name='masks'),
softmax_loss_function = sampled_loss,
name='loss'
)
return encoder_outputs, loss
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[1]
# self.z: [N, 3]
losses = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.z))
tf.add_to_collection('losses', losses)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope)
ema_op = ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
print('opname:', ema_var.op.name)
print('var:', ema_var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = ema.average(var)
tf.summary.histogram(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, JX, VW, VC, d, W = \
config.batch_size, config.max_sent_size, \
config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size
feed_dict = {}
if config.len_opt:
"""
Note that this optimization results in variable GPU RAM usage (i.e. can cause OOM in the middle of training.)
First test without len_opt and make sure no OOM, and use len_opt
"""
if sum(len(sent) for sent in batch.data['x_list']) == 0:
new_JX = 1
else:
new_JX = max(len(sent) for sent in batch.data['x_list'])
if sum(len(ques) for ques in batch.data['y_list']) == 0:
new_JY = 1
else:
new_JY = max(len(ques) for ques in batch.data['y_list'])
JX = min(JX, max(new_JX, new_JY))
x = np.zeros([N, JX], dtype='int32')
cx = np.zeros([N, JX, W], dtype='int32')
x_mask = np.zeros([N, JX], dtype='bool')
x_length = np.zeros([N], dtype='int32')
y = np.zeros([N, JX], dtype='int32')
cy = np.zeros([N, JX, W], dtype='int32')
y_mask = np.zeros([N, JX], dtype='bool')
y_length = np.zeros([N], dtype='int32')
z = np.zeros([N, 3], dtype='float32')
feed_dict[self.x] = x
feed_dict[self.x_mask] = x_mask
feed_dict[self.x_length] = x_length
feed_dict[self.cx] = cx
feed_dict[self.y] = y
feed_dict[self.cy] = cy
feed_dict[self.y_mask] = y_mask
feed_dict[self.y_length] = y_length
feed_dict[self.z] = z
feed_dict[self.is_train] = is_train
if config.use_glove_for_unk:
feed_dict[self.new_emb_mat] = batch.shared['new_emb_mat']
X = batch.data['x_list']
CX = batch.data['cx_list']
Z = batch.data['z_list']
for i, zi in enumerate(Z):
z[i] = zi
def _get_word(word):
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
if config.use_glove_for_unk:
d2 = batch.shared['new_word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d2:
return d2[each] + | |
address must have a unique identifier.",
required=True,
),
"location_id": pa.Column(
dtype="string",
description="The identifier of the location for which this is the address.",
required=False,
nullable=True,
),
"attention": pa.Column(
dtype="string",
description="The person or entity whose attention should be sought at the location (Often included as 'care of' component of an address.)",
required=False,
nullable=True,
),
"address_1": pa.Column(
dtype="string",
description="The first line(s) of the address, including office, building number and street.",
required=False,
nullable=True,
),
"address_2": pa.Column(
dtype="string",
description="(Deprecated) A second (additional) line of address information. (This field is deprecated: we recommend including all address information before 'city' as a comma separated list in address_1. There is no guarantee that systems will read this line of address information.)",
required=False,
nullable=True,
),
"address_3": pa.Column(
dtype="string",
description="(Deprecated) A third (additional) line of address information. (This field is deprecated: we recommend including all address information before 'city' as a comma separated list in address_1. There is no guarantee that systems will read this line of address information.)",
required=False,
nullable=True,
),
"address_4": pa.Column(
dtype="string",
description="(Deprecated) The fourth (additional) line of address information. (This field is deprecated: we recommend including all address information before 'city' as a comma separated list in address_1. There is no guarantee that systems will read this line of address information.)",
required=False,
nullable=True,
),
"city": pa.Column(
dtype="string",
description="The city in which the address is located.",
required=False,
nullable=True,
),
"region": pa.Column(
dtype="string",
description="The region in which the address is located (optional).",
required=False,
nullable=True,
),
"state_province": pa.Column(
dtype="string",
description="The state or province in which the address is located.",
required=False,
nullable=True,
),
"postal_code": pa.Column(
dtype="string",
description="The postal code for the address.",
required=False,
nullable=True,
),
"country": pa.Column(
dtype="string",
description="The country in which the address is located. This should be given as an ISO 3361-1 country code (two letter abbreviation).",
required=True,
),
}
)
hsds_postal_address_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each postal address must have a unique identifier",
required=True,
),
"location_id": pa.Column(
dtype="string",
description="The identifier of the location for which this is the postal address.",
required=False,
nullable=True,
),
"attention": pa.Column(
dtype="string",
description="The person or entity whose attention should be sought at the location (Often included as 'care of' component of an address.)",
required=False,
nullable=True,
),
"address_1": pa.Column(
dtype="string",
description="The first line(s) of the address, including office, building number and street.",
required=True,
),
"address_2": pa.Column(
dtype="string",
description="(Deprecated) A second (additional) line of address information. (This field is deprecated: we recommend including all address information before 'city' as a comma separated list in address_1. There is no guarantee that systems will read this line of address information.)",
required=False,
nullable=True,
),
"address_3": pa.Column(
dtype="string",
description="(Deprecated) A third (additional) line of address information. (This field is deprecated: we recommend including all address information before 'city' as a comma separated list in address_1. There is no guarantee that systems will read this line of address information.)",
required=False,
nullable=True,
),
"address_4": pa.Column(
dtype="string",
description="(Deprecated) The fourth (additional) line of address information. (This field is deprecated: we recommend including all address information before 'city' as a comma separated list in address_1. There is no guarantee that systems will read this line of address information.)",
required=False,
nullable=True,
),
"city": pa.Column(
dtype="string",
description="The city in which the address is located.",
required=True,
),
"region": pa.Column(
dtype="string",
description="The region in which the address is located (optional).",
required=False,
nullable=True,
),
"state_province": pa.Column(
dtype="string",
description="The state or province in which the address is located.",
required=True,
),
"postal_code": pa.Column(
dtype="string",
description="The postal code for the address.",
required=True,
),
"country": pa.Column(
dtype="string",
description="The country in which the address is located. This should be given as an ISO 3361-1 country code (two letter abbreviation)",
required=True,
),
}
)
hsds_regular_schedule_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each entry must have a unique identifier",
required=True,
),
"service_id": pa.Column(
dtype="string",
description="The identifier of the service for which this is the regular schedule",
required=False,
nullable=True,
),
"location_id": pa.Column(
dtype="string",
description="The identifier of the location for which this is the regular schedule",
required=False,
nullable=True,
),
"service_at_location_id": pa.Column(
dtype="string",
description="The identifier of the 'service at location' table entry, when this schedule is specific to a service in a particular location.",
required=False,
nullable=True,
),
"weekday": pa.Column(
dtype="string",
description="The day of the week that this entry relates to",
required=False,
nullable=True,
),
"opens_at": pa.Column(
dtype="string",
description="The time when a service or location opens. This should use HH:MM format and should include timezone information, either adding the suffix 'Z' when the date is in UTC, or including an offset from UTC (e.g. 09:00-05:00 for 9am East Coast Time. ",
required=False,
nullable=True,
),
"closes_at": pa.Column(
dtype="string",
description="The time when a service or location opens. This should use HH:MM format and should include timezone information, either adding the suffix 'Z' when the date is in UTC, or including an offset from UTC (e.g. 09:00-05:00 for 9am East Coast Time.",
required=False,
nullable=True,
),
}
)
hsds_holiday_schedule_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each entry must have a unique identifier",
required=True,
),
"service_id": pa.Column(
dtype="string",
description="The identifier of the service for which this is the holiday schedule",
required=False,
nullable=True,
),
"location_id": pa.Column(
dtype="string",
description="The identifier of the location for which this is the holiday schedule",
required=False,
nullable=True,
),
"service_at_location_id": pa.Column(
dtype="string",
description="The identifier of the 'service at location' table entry, when this schedule is specific to a service in a particular location.",
required=False,
nullable=True,
),
"closed": pa.Column(
dtype="boolean",
description="Indicates if a service or location is closed during a public holiday",
required=True,
),
"opens_at": pa.Column(
dtype="time",
description="The time when a service or location opens. This should use HH:MM format and should include timezone information, either adding the suffix 'Z' when the date is in UTC, or including an offset from UTC (e.g. 09:00-05:00 for 9am East Coast Time.",
required=False,
nullable=True,
),
"closes_at": pa.Column(
dtype="time",
description="The time when a service or location closes. This should use HH:MM format and should include timezone information, either adding the suffix 'Z' when the date is in UTC, or including an offset from UTC (e.g. 09:00-05:00 for 9am East Coast Time.",
required=False,
nullable=True,
),
"start_date": pa.Column(
dtype="datetime64",
description="The first day that a service or location is closed during a public or private holiday",
required=True,
),
"end_date": pa.Column(
dtype="datetime64",
description="The last day that a service or location is closed during a public or private holiday",
required=True,
),
}
)
hsds_funding_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each entry must have a unique identifier",
required=True,
),
"organization_id": pa.Column(
dtype="string",
description="The identifier of the organization in receipt of this funding.",
required=False,
nullable=True,
),
"service_id": pa.Column(
dtype="string",
description="The identifier of the service in receipt of this funding",
required=False,
nullable=True,
),
"source": pa.Column(
dtype="string",
description="A free text description of the source of funds for this organization or service.",
required=False,
nullable=True,
),
}
)
hsds_eligibility_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each entry must have a unique identifier",
required=True,
),
"service_id": pa.Column(
dtype="string",
description="The identifier of the service for which this entry describes the eligibility criteria",
required=False,
nullable=True,
),
"eligibility": pa.Column(
dtype="string",
description="The rules or guidelines that determine who can receive the service.",
required=False,
nullable=True,
),
}
)
hsds_service_area_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each service area must have a unique identifier",
required=True,
),
"service_id": pa.Column(
dtype="string",
description="The identifier of the service for which this entry describes the service area",
required=False,
nullable=True,
),
"service_area": pa.Column(
dtype="string",
description="The geographic area where a service is available. This is a free-text description, and so may be precise or indefinite as necessary.",
required=False,
nullable=True,
),
"description": pa.Column(
dtype="string",
description="A more detailed description of this service area. Used to provide any additional information that cannot be communicated using the structured area and geometry fields.",
required=False,
nullable=True,
),
}
)
hsds_required_document_schema = pa.DataFrameSchema(
{
"id": pa.Column(
dtype="string",
description="Each document must have a unique identifier",
required=True,
),
"service_id": pa.Column(
dtype="string",
description="The identifier of the service for which this entry describes the required document",
required=False,
nullable=True,
),
"document": pa.Column(
dtype="string",
description="The document required to apply for or receive the service. e.g. 'Government-issued ID', | |
<reponame>Alexhuszagh/XLDiscoverer<filename>xldlib/export/dataframes/base.py
'''
Export/Dataframes/base
______________________
Inheritable base-class for dataframe instances.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules/submodules
import itertools as it
import operator as op
from collections import defaultdict, OrderedDict
import numpy as np
import six
from xldlib.definitions import re
from xldlib.objects.abstract.dataframe import DataFrameDict
from xldlib.resources.parameters import column_defs, defaults, reports
from xldlib.utils import decorators, logger, xictools
__all__ = [
'Amplitudes',
'Dataframe',
'HierarchicalDataframe',
'QuantitativeDataframe'
]
# CONSTANTS
# ---------
LOWER_SIGMA = u'\u03C3'
UPPER_SIGMA = u'\u03A3'
# REGEXES
# -------
NONQUANTIFIED = re.compile(u'<|>|-|{}'.format(xictools.INFINITY), re.UNICODE)
# DATA
# ----
CONCATENATED = {
'report',
'best_peptide',
'best_peptide_file',
}
POLYPEPTIDE = {
reports.LINKTYPES['interlink'],
reports.LINKTYPES['multilink'],
}
MONOPEPTIDE = {
reports.LINKTYPES['intralink'],
reports.LINKTYPES['deadend'],
reports.LINKTYPES['single'],
}
SORT_COLUMNS = [
'Search Name',
'Cross-Linker',
'Precursor Scan',
'Precursor RT',
'Product Scan',
'Product RT'
]
QUANTITATIVE_SORT_COLUMNS = [(' ', i) for i in SORT_COLUMNS]
# TEMPLATES
# ---------
POLYPEPTIDE_TEMPLATE = OrderedDict([
('intersubunit', 'Intersubunit {}'),
('intrasubunit', 'Intrasubunit {}'),
('greylist_intersubunit', 'Greylist Intersubunit {}'),
('greylist_intrasubunit', 'Greylist Intrasubunit {}'),
])
MONOPEPTIDE_TEMPLATE = OrderedDict([
('standard', 'Standard {}'),
('greylist', 'Greylist {}'),
])
# OBJECTS
# -------
class Subdataframe(DataFrameDict):
'''Subdataframe object concatenated into the report'''
def __init__(self, columns, sheet=None, title=None):
super(Subdataframe, self).__init__(columns=columns)
if sheet is not None:
self.title = title.format(sheet.title)
else:
self.title = None
@logger.init('spreadsheet', level='DEBUG')
class Dataframe(DataFrameDict):
'''Inheritable dataframe for shared methods'''
def __init__(self, sheet, columns=None):
super(Dataframe, self).__init__()
self.sheet = sheet
if columns is None:
columns = column_defs.COLUMNS[sheet.name]
self.columns = columns
# SETTERS
def set_named_columns(self, columns):
self.set_columns(self.getnamedcolumns(columns))
def set_default_columns(self, dimensions=1):
'''Sets the default columns in case no crosslinks identified'''
self.dimensions = dimensions
self.set_named_columns(self.columns.getordered(dimensions))
def set_dimensions(self, crosslinks):
'''Returns the maximum crosslink dimensions among the crosslinks'''
previous = getattr(self, "dimensions", -1)
dimensions = max(len(i.crosslink.index) for i in crosslinks)
if dimensions > previous:
self.dimensions = dimensions
# for dependent dataframes
if hasattr(self, "resize"):
self.resize(previous)
def set_subdataframes(self):
'''Sets the temporary dataframe holder'''
self.dataframes = OrderedDict()
if self.sheet.name in CONCATENATED:
if self.sheet.linktype in POLYPEPTIDE:
for key, title in POLYPEPTIDE_TEMPLATE.items():
dataframe = Subdataframe(self.keys(), self.sheet, title)
self.dataframes[key] = dataframe
elif self.sheet.linktype in MONOPEPTIDE:
for key, title in MONOPEPTIDE_TEMPLATE.items():
dataframe = Subdataframe(self.keys(), self.sheet, title)
self.dataframes[key] = dataframe
else:
self.dataframes['report'] = Subdataframe(self.keys())
def set_version(self):
self.set_header()
self.set_value()
def set_linkagecounts(self, row, linkage):
'''Adds the linkage counts to the current row'''
if self.sheet.name == 'quantitative':
self.__set_linkagecount(row, (' ', 'Counts Info'), linkage.count)
self.__set_linkagecount(row, (' ', 'Counts Unique Info'), 1)
else:
self.__set_linkagecount(row, 'Counts Info', linkage.count)
self.__set_linkagecount(row, 'Counts Unique Info', 1)
def __set_linkagecount(self, row, column, count):
'''Sets the counts for a given column'''
if column in self:
row[column] = count
else:
for index in range(self.dimensions):
newcolumn = self.getcolumn(column, index)
row[newcolumn] = count
# GETTERS
def getcolumn(self, column, index):
'''Returns the suffixed column'''
suffix = self.columns.getsuffix(index)
if isinstance(column, six.string_types):
return column + suffix
else:
return column[0], column[1] + suffix
def getnamedcolumns(self, columns):
'''Returns the named spectral columns from a column list'''
name = self.matched.reporterion.name
if isinstance(columns, list):
return [i.format(reporterion=name) for i in columns]
elif isinstance(columns, dict):
return {k.format(reporterion=name): v.format(reporterion=name)
for k, v in columns.items()}
def __getsort(self, columns=SORT_COLUMNS, order=True):
'''Returns the columns to sort by'''
for name in columns:
if name in self:
yield (name, order)
else:
for index in range(self.dimensions):
newname = self.getcolumn(name, index)
if newname in self:
yield (newname, order)
break
# HELPERS
def _concat(self):
'''Concatenates the subfataframes into the current one'''
for index, dataframe in enumerate(self.dataframes.values()):
if dataframe.title is not None:
self.set_value(value=dataframe.title)
elif index:
self.set_value()
self.concat(dataframe)
self.set_value()
@logger.except_error(ValueError)
def _sort(self, sort=None, **kwds):
'''Sorts the subdataframes, ignoring the header'''
if sort is None:
sort = self.__getsort(**kwds)
columns, order = zip(*sort)
for dataframe in self.dataframes.values():
if dataframe and next(iter(dataframe.values())):
dataframe.sort(columns=columns, ascending=order)
def _rename(self, dimensions):
'''Returns a renamed dictionary with custom named'''
rename = self.columns.getrename(dimensions)
self.rename(rename)
def _resize(self, previous):
'''Resizes the dataframe dimensions for all the suffixed keys'''
rename = self.columns.getresize(previous)
renamed = self.getnamedcolumns(rename)
columns = self.columns.getordered(self.dimensions)
named = self.getnamedcolumns(columns)
for dataframe in [self] + list(self.dataframes.values()):
dataframe.rename(renamed)
length = dataframe.get_last_index()
dataframe.set_columns(named, length)
dataframe._change_root(named)
@staticmethod
def _valuechecker(value, index=0):
'''Normalizes the value for data export'''
if isinstance(value, (list, tuple)):
return value[index]
return value
@logger.init('spreadsheet', level='DEBUG')
class MatchedDataframe(Dataframe):
'''Definitions for a base dataframe with a matched object'''
def __init__(self, matched, *args, **kwds):
super(MatchedDataframe, self).__init__(*args, **kwds)
self.matched = matched
# GETTERS
def get_dataframe(self, data, linkage):
'''Returns the temporary dataframe holder'''
length = len(self.dataframes)
if length == 1:
return self.dataframes['report']
elif length == 2 and linkage.greylist:
return self.dataframes['greylist']
elif length == 2:
return self.dataframes['standard']
elif length == 4 and linkage.greylist and linkage.intrasubunit:
return self.dataframes['greylist_intrasubunit']
elif length == 4 and linkage.greylist:
return self.dataframes['greylist_intersubunit']
elif length == 4 and linkage.intrasubunit:
return self.dataframes['intrasubunit']
else:
return self.dataframes['intersubunit']
def get_dataframe_row(self, data, crosslink):
'''Processes the data into a singular row'''
key = self.__get_spreadsheekey(crosslink)
spreadsheet = data['spreadsheet'][key][crosslink.index]
row = {}
for column, values in spreadsheet.items():
if column in self:
row[column] = self._valuechecker(values)
elif isinstance(values, (tuple, list)):
for index, value in enumerate(values):
newcolumn = self.getcolumn(column, index)
row[newcolumn] = self._valuechecker(values, index)
return row
def __get_spreadsheekey(self, crosslink):
'''Returns the spreadsheet key, dependent on the sheet type'''
if type(crosslink).__name__ == 'Quantitative':
return 'labeled'
elif type(crosslink.crosslink).__name__ == 'Single':
return 'singles'
else:
return 'crosslinks'
# HELPERS
def _append(self, crosslink, linkage):
'''Appends a new row to the dataframe for standard-like reports'''
data = self.matched[crosslink.row]
dataframe = self.get_dataframe(data, linkage)
index = dataframe.get_last_index()
row = self.get_dataframe_row(data, crosslink)
if linkage not in self.seen:
self.set_linkagecounts(row, linkage)
self.seen.add(linkage)
dataframe.loc[index] = row
# HIERARCHICAL
# ------------
ROW_TOTALS = (
'Total Sum',
'Total Count',
'Total Interactions'
)
class HierarchicalDataframe(Dataframe):
'''Definitions for dataframes with hierarchical headers'''
# COLUMNS
def get2dfileheaders(self, linkages):
'''Returns the current file header from the matched data'''
filenames = sorted({i.file for i in linkages})
crosslinkers = sorted({i.crosslinker for i in linkages})
columns = [(j, ' '*i) for i, j in enumerate(self.ordered)]
if self.columns.other['order'] == 'File':
for filename in filenames:
columns.extend((filename, i) for i in crosslinkers)
else:
for crosslinker in crosslinkers:
columns.extend((crosslinker, i) for i in filenames)
if getattr(self, "quantitative", False):
for meth in ('getrationame', 'geterrorname'):
columns.append((' ', ''))
columns.extend((i, getattr(self, meth)()) for i in filenames)
return columns
# COUNTS
def setlinkagecounts(self, counts, index_memo):
'''Stores the linkage counts for each file'''
count_column = self.get_column(2)
sum_column = self.get_column(3)
for key, index in index_memo.items():
self.loc[index, count_column] = counts.get_counts(key)
self.loc[index, sum_column] = counts.get_sum(key)
def setfilecounts(self, counts, headers=False, set_defaults=True):
'''Stores the counts for each individual file and crosslinker'''
index = self.get_last_index()
firstcolumn = self.get_column()
for offset, title in enumerate(ROW_TOTALS):
self.loc[index+offset, firstcolumn] = title
if headers:
self.__setheadercounts(counts, index, headers)
else:
self.__setcounts(counts, index)
if set_defaults:
self.__setdefaults(index, counts)
self.__settotals(index, counts)
def __setheadercounts(self, counts, index, headers):
'''Counts setter for ratio table objects'''
for name in headers:
for main in counts:
key = (main, self.upper_sigma(name))
self.loc[index, key] = counts.get_sum(main, name)
self.loc[index+1, key] = counts.get_counts(main, name)
self.loc[index+2, key] = counts.get_interactions(main, name)
def __setcounts(self, counts, index):
'''Count setter for single integer items as counts values'''
for column in counts:
self.loc[index, column] = counts.get_sum(column)
self.loc[index+1, column] = counts.get_counts(column)
self.loc[index+2, column] = counts.get_interactions(column)
def __setdefaults(self, index, counts, default=0):
'''Sets the default totals for each column'''
missing = self.getmissing(counts)
for column in missing:
for offset in range(len(ROW_TOTALS)):
self.loc[index+offset, column] = default
def __settotals(self, index, counts):
'''Adds the total sums for the current linkage type'''
column = self.get_column(1)
self.loc[index, column] = counts.get_totalsum()
self.loc[index+1, column] = counts.get_totalcounts()
self.loc[index+2, column] = counts.get_totalinteractions()
def getmissing(self, counts):
'''Returns the columns with no crosslinks identified'''
length = len(self.ordered)
missing = []
for index, column in enumerate(self):
if column == (' ', ''):
# reaching ratio/error counts
break
if index >= length and column not in counts:
missing.append(column)
return missing
# HELPERS
def upper_sigma(self, string):
return self.__format(string, UPPER_SIGMA)
def lower_sigma(self, string):
return self.__format(string, LOWER_SIGMA)
@staticmethod
def __format(string, character):
return u'{} ({})'.format(string, character)
# QUANTITATIVE
# ------------
# HELPERS
# -------
def getintegrated(spreadsheet, headers):
return tuple(xictools.IntegralData.fromspreadsheet(
spreadsheet, i) for i in headers)
# OBJECTS
# -------
class FileAmplitudes(defaultdict):
'''Definitions for linkage amplitudes within a file'''
def __init__(self, ratios, factory=set, *args, **kwds):
super(FileAmplitudes, self).__init__(factory, *args, **kwds)
self.range = 10**(defaults.DEFAULTS['intensity_filtering_range'])
self.ratios = ratios
def getmax(self):
return max(self.ratios(i) for i in self.integratedvalues())
def getmin(self):
'''Returns the minimum intensity threshold when filtering'''
return self.getmax() / self.range
def integratedvalues(self):
'''Generates an iterable over all the integrated values'''
for linkage, integrated in self.integrateditems():
yield integrated
def integrateditems(self):
'''Generate an iterable over all integrated items'''
for linkage, values in self.items():
for integrated in it.chain.from_iterable(values):
yield linkage, integrated
def labeleditems(self):
'''Generate an iterable over all | |
work on a task.
def requestPause(self): # Request the thread running the task to suspend its work.
self.pauseRequested.rise() # Raise the flag requesting the thread to suspend its work.
def _checkPauseReq(self): # Check for pause requests.
if self.pauseRequested(): # If someone is asking us to pause,
self._pause() # Then pause.
def _pause(self): # Announce that the thread has paused, and pause it.
self._verifyOwnership() # Only the owner thread of this task can do this.
self.paused.rise() # Raise the flag announcing that we've paused.
self.pauseRequested.waitDown() # Wait for the pauseRequested flag to go down.
self.paused.lower() # Lower the paused flag to announce that we've unpaused.
# Methods associated with aborting work on a task.
def requestAbort(self): # Request the thread running the task to abort its work.
self.abortRequested.rise() # Raise the flag requesting the thread to abort its work.
def _checkAbortReq(self): # Check for abort requests.
if self.abortRequested(): # If someone is asking us to abort,
self._abort() # Then abort.
def _abort(self): # Abort the running of this task.
self._verifyOwnership() # Only the owner thread of this task can do this.
raise WorkAborted("WorkItem._abort(): Aborting work on this work item.")
#-> Do it very simply - just raise a WorkAborted exception.
# This will be caught and trigger our _fail() method.
# Methods associated with failure to complete a task normally.
def _fail(self): # Announce that we failed to complete the task.
self._verifyOwnership() # Only the owner thread of this task can do this.
self.failed.rise() # Raise the flag announcing that the task failed.
self._end() # Announce the end of work on the task.
def waitFail(self): # Wait for work on this task to fail.
self.failed.wait() # Wait for the "failed" flag to be raised.
# Methods associated with exiting a task normally (but earlier than otherwise).
def requestExit(self): # Request the thread running the task to wrap things up as soon as it can.
self.exitRequested.rise() # Raise the flag requesting the thread to exit early if possible.
def _checkExitReq(self): # Check for exit requests.
if self.exitRequested(): # If someone is asking us to exit early,
self._exit() # Then exit.
def _exit(self): # Exit this task early (but normally).
self._verifyOwnership() # Only the owner thread of this task can do this.
raise EarlyCompletion("WorkItem._exit(): Exiting early from this task.")
#-> Simply raise an EarlyCompletion exception,
# which will be caught and trigger our _finish() method.
# Methods associated with successful completion of a task.
def _finish(self): # Announce that the task is finished.
self._verifyOwnership() # Only the owner thread of this task can do this.
self.done.rise() # Raise the flag announcing that the task is done.
self._end() # Announce the end of work on the task.
def waitFinish(self): # Wait for some thread to finish doing the task.
self.done.wait() # Wait for the "done" flag to be raised.
# Methods associating with the ending of a task (for any reason).
def _end(self): # Announce the end of work on the task.
self._verifyOwnership() # Only the owner thread of this task can do this.
self.stopped.rise() # Raise the flag announcing that we have stopped working on the task.
def waitStop(self): # Wait for work on the task to stop.
self.stopped.wait() # Wait for the "stopped" flag to be raised.
def wait(self): # Waiting for a work item generally means,
self.waitStop() # waiting for the work on it to stop.
# Caller: You may want to check the done() and/or failed() flags.
def _checkRequests(self): # Check for flags requesting us to do something.
self._verifyOwnership() # Only the owner thread of this task can do this.
self._checkAbortReq() # Abort requests are handled first - highest priority.
self._checkExitReq() # Next, check for exit requests - next highest priority.
self._checkPauseReq() # Finally, check for pause requests - lowest priority.
# Set the owner of a work item to a given thread (usually a Worker thread).
# This cannot be done after the task has already been started.
def setOwner(self, worker:Thread): # Set the owner of this WorkItem to the given (worker) thread.
with self.lock: # Thread-safely,
if self.started(): # If work on this task has already been started,
raise AlreadyStarted("WorkItem.setOwner(): Can't change owner "
"of this work item b/c work on it has "
"already started!")
#-> Cry foul.
else:
with self.owned.lock: # Do the following atomically in context of the owned flag.
self.owner = worker # Set the owner to the given worker.
self.owned.rise() # Raise the owned flag to declare this task is owned.
# A work item is callable. The call method causes the calling thread to
# take ownership of the work item (if it does not already have it).
# Once the task has started, ownership of it may not be reassigned to
# a new thread.
def __call__(self): # To do a work item,
with self.lock: # Thread-safely for this work item,
self._verifyOwnership() # Make sure we're the owner (only the owner thread of this task can do this).
if self.task == None: # If our callable attribute is null,
raise NullCallable("WorkItem.__call__(): Can't perform this "
"work item because its .task attribute is "
"null (None).")
# complain about that.
elif self.started(): # If some other thread has already started doing this task,
raise AlreadyStarted("WorkItem.__call__(): Can't perform this "
"work item because someone has already "
"started working on it!")
# Raise an exception to warn user about that.
else: # Otherwise,
try: # We'll try doing the task.
self._start() # First, announce that we're starting to work on the task.
self._checkAbortReq() # Go ahead and check for any early abort requests.
self._checkPauseReq() # Go ahead and check for any early pause requests.
try:
# print("worklist.WorkItem.__call__(): I'm trying to call this task: ", self.task)
self.result = self.task.__call__() # Then, actually do the task (it must be callable).
self.haveResult.rise() # Announce that we have a result.
except BaseException as e: # If it raises any kind of exception whatsoever,
self.exception = e # Remember what exception it was,
self.haveResult.rise() # Announce that we have a result.
raise e # and re-raise it.
self._finish() # Finally, announce that we finished the task.
except (EarlyCompletion, ExitingByRequest): # If the task terminates by throwing an EarlyCompletion or ExitingByRequest exception, then
self._finish() # Announce that we finished the task in that case as well.
raise # And re-signal the early completion to our caller also.
except: # For all other exceptions,
self._fail() # Announce that we failed to complete the task.
raise # And re-raise the exception.
#---------------------------------------------------------------------------------
# Worker [module public class] - Generic base class for worker threads.
#
# This early declaration is so that the Worklist class can
# refer to a generic Worker type, even though our worker
# subclasses are not defined till later. This is overridden
# later by a more specific definition.
class Worker(ThreadActor): pass
#---------------------------------------------------------------------
# Worklist [module private class] - Generic base class for
# worklists of various specific types.
#
# (ProjectWorklist, AssignmentWorklist, InboxWorklist,
# DesktopWorklist) abbreviated (Worklist, Assignment, Inbox,
# Desk).
#
# DesktopWorklist - A very small Desque, normally limited to just
# 1 or a few items, representing work item(s) that a given
# worker is about to start work on (after finishing the
# current work item).
#
# The desktop is a Desque rather than a simple variable so
# that if a customer needs a task done urgently, he can put
# the item on top of the desk and it will be done as soon
# as the worker finishes his current task. For faster
# response, the customer could also request the worker to
# exit, abort, or suspend his current task - if it can be
# supended it can be put back on the desk. | |
<filename>modape/modis/smooth.py
"""
MODIS smooth HDF5 class.
This file contains the class representing a smoothed MODIS HDF5 file.
"""
# pylint: disable=import-error
from array import array
from datetime import datetime, timedelta
import logging
from pathlib import Path
import h5py
from modape.constants import TEMPINT_LABELS
from modape.exceptions import HDF5CreationError, HDF5WriteError
from modape.modis.io import HDF5Base
from modape.utils import DateHelper, fromjulian
from modape.whittaker import lag1corr, ws2d, ws2dp, ws2doptv, ws2doptvp # pylint: disable=no-name-in-module
import numpy as np
log = logging.getLogger(__name__)
class ModisSmoothH5(HDF5Base):
"""Class representing HDF5 file containing smoothed MODIS data.
A smooth HDF5 file is directly linked to it's raw HDF5 counterpart. When running
the Whittaker smoother, data is read from the raw HDF5 file, smoothed and gapfilled
(and if requested a temporal interpolation is performed), the resulting data is then written
to the smooth HDF5 file.
"""
def __init__(self,
rawfile: str,
targetdir: str,
startdate: str = None,
tempint: int = None) -> None:
"""Initialize instance of `ModisSmoothH5` class.
To create an instance of `ModisSmoothH5`, a full path to
a `ModisRawH5` HDF5 file needs to be specified.
If the HDF5 file for the `ModisSmoothH5` already exists in the `targetdir`,
the file will be updated, otherwise created.
To perform temporal interpolation, a desired timestep
for a temporal grid has to be specified with `tempint`.
If `tempint` is 5 or 10, the grid is set to a specific default.
Otherwise, `startdate` can be used to define a default grid in
conjunction with `tempint`.
Args:
rawfile (str): Full path to raw HDF5 file.
targetdir (str): Target directory for smooth HDF5 file.
startdate (str): Start date for temporal interpolation (as julian date YYYYDDD).
tempint (int): timesteps for temporal interpolation.
Raises:
AssertionError: If specified raw HDF5 file does not exist.
"""
self.rawfile = Path(rawfile)
self.startdate = startdate
assert self.rawfile.exists(), f"Raw HDF5 file {self.rawfile} does not exist."
# Parse tempint to get flag for filename
if tempint is not None:
try:
txflag = TEMPINT_LABELS[int(tempint)]
except KeyError:
txflag = "c"
self.tinterpolate = True
self.temporalresolution = tempint
else:
txflag = "n"
self.tinterpolate = False
self.temporalresolution = None
# Filename for smoothed HDF5
rawfile_trunk = self.rawfile.name.split(".")
smoothfile_trunk = ".".join(
rawfile_trunk[:-2] + \
["tx"+txflag] + \
rawfile_trunk[-2:-1]
)
filename = f"{targetdir}/{smoothfile_trunk}.h5"
super().__init__(filename=filename)
def create(self):
"""Creates HDF5 file.
If the corresponding HDF5 is not found in the target directory,
it's created.
Raises:
HDF5CreationError: If creation of HDF5 file fails.
"""
# Try reading info from raw HDF5
#pylint: disable=R1721
with h5py.File(self.rawfile, "r") as h5f_raw:
dset = h5f_raw.get("data")
rawshape = dset.shape
rawchunks = dset.chunks
datatype = dset.dtype
compression = dset.compression
raw_dates_all = [x.decode() for x in h5f_raw.get("dates")[...]]
raw_attrs = {key:value for key, value in dset.attrs.items()}
if self.temporalresolution is None:
tempres = raw_attrs["temporalresolution"]
else:
tempres = self.temporalresolution
dates = DateHelper(rawdates=raw_dates_all,
rtres=int(raw_attrs["temporalresolution"]),
stres=int(tempres),
start=self.startdate)
dates_length = len(dates.target)
nrows = raw_attrs["RasterYSize"]
ncols = raw_attrs["RasterXSize"]
try:
with h5py.File(self.filename, "x", libver="latest") as h5f:
dset = h5f.create_dataset("data",
shape=(rawshape[0], dates_length),
dtype=datatype, maxshape=(rawshape[0], None),
chunks=rawchunks,
compression=compression,
fillvalue=raw_attrs["nodata"])
h5f.create_dataset("sgrid",
shape=(nrows*ncols,),
dtype="float32",
maxshape=(nrows*ncols,),
chunks=(rawchunks[0],),
compression=compression)
h5f.create_dataset("dates",
shape=(dates_length,),
maxshape=(None,),
dtype="S8",
compression=compression,
data=np.array(dates.target, dtype="S8"))
h5f.create_dataset("rawdates",
shape=(len(raw_dates_all),),
maxshape=(None,),
dtype="S8",
compression=compression,
data=np.array(raw_dates_all, dtype="S8"))
raw_attrs["temporalresolution"] = tempres
dset.attrs.update(raw_attrs)
self.exists = True
except Exception as _:
log.error("Error creating %s", str(self.filename))
raise HDF5CreationError(f"Error creating {str(self.filename)}!")
def smooth(self,
svalue: float = None,
p: float = None,
soptimize: bool = None,
srange: np.ndarray = None,
nsmooth: int = 0,
nupdate: int = 0,
) -> None:
"""Applies Whittaker smoother to the data.
This method reads raw data from the raw HDF5 and applies the Whittaker
filter according to the parameters supplied. The resulting filtered data
is then stored in the smooth HDF5 file.
The parameters relevant for derermining which Whittaker
variant is supplied, are `svalue`, `soptimize` and `p`:
If the `soptimize` flag is `True`, the V-curve optimization is performed.
If in addition a `p` value is supplied, the assymmetric V-curve optimization is performed instead.
In both cases, the range of S values to optimize in can be supplied with `srange`.
If `soptimize` is `False` and a `svalue` is passed, smoothing using that fixed value (needs to be `log10` of S)
for each pixel is performed. If a `p` value is passed, asymmetric smoothing with the fixed value is performed instead.
If none of `svalue` and `soptimize` is specified, a previosuly initialized S value for each pixel
will be read from grid and used for the smoothing.
Using `nsmooth` and `nupdate`, the number or raw timesteps used for smoothing, and the number
of filtered timesteps updated in the smooth HDF5 target file, can be adjusted.
Args:
svalue (float): Log10 value of smoothing parameter S (for fixed smoothing).
p (float): P value for asymmetric smoothing.
soptimize (bool): Flag for V-curve optimization.
srange (np.ndarray): S-range for V-curve optimization.
nsmooth (int): Number of raw timesteps for smoothing.
nupdate (int): Number of smooth timesteps updated in file.
Raises:
AssertionError: If smooth HDF5 file does not exist (`create` needs to be executed before)
ValueError: If `nsmooth` is smaller than `nupdate`.
ValueError: If `srange` is not specified as a numpy array with expected dimensions.
HDF5WriteError: If write to HDF5 file fails.
"""
assert self.filename.exists(), "File doesn't exist! Can't run smoother."
if (nsmooth != 0) and (nupdate != 0):
if nsmooth < nupdate:
raise ValueError("nsmooth must be bigger or equal (>=) to nupdate!")
if soptimize and srange is not None:
if not isinstance(srange, np.ndarray):
raise ValueError("srange needs to be supplied as numpy array")
log.info("Runnig smoother on %s", str(self.filename))
processing_starttime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log.debug("Reading metadata from HDF5s")
with h5py.File(self.rawfile, "r") as h5f_open:
raw_ds = h5f_open.get("data")
raw_shape = raw_ds.shape
raw_chunks = raw_ds.chunks
raw_attrs = dict(raw_ds.attrs.items())
raw_dates_all = [x.decode() for x in h5f_open.get("dates")[...]]
raw_dates_nsmooth = raw_dates_all[-nsmooth:]
with h5py.File(self.filename, "r+") as h5f_open:
smt_ds = h5f_open.get("data")
smt_attrs = smt_ds.attrs
smt_shape = smt_ds.shape
smt_chunks = smt_ds.chunks
temporalresolution = smt_attrs["temporalresolution"]
tshift = int(smt_attrs["tshift"])
dates = DateHelper(rawdates=raw_dates_all,
rtres=int(raw_attrs["temporalresolution"]),
stres=int(temporalresolution),
start=self.startdate)
# Resize if date list is bigger than shape of smoothed data
if dates.target_length > smt_shape[1]:
log.debug("Resizing dataset! Current %s, required %s", smt_shape[1], dates.target_length)
smt_dates = h5f_open.get("dates")
smt_dates.resize((dates.target_length,))
smt_ds.resize((smt_shape[0], dates.target_length))
smt_dates[...] = np.array(dates.target, dtype="S8")
smt_shape = smt_ds.shape
nodata = raw_attrs["nodata"]
dix = dates.getDIX(nupdate)
arr_raw = np.zeros((raw_chunks[0], len(raw_dates_nsmooth)), dtype="double")
if nsmooth > 0:
read_offset = raw_shape[1] - nsmooth
else:
read_offset = 0
if nupdate > 0:
write_offset = smt_shape[1] - nupdate
else:
write_offset = 0
if self.tinterpolate:
log.debug("Temporal interpolation triggered!")
arr_smt = np.full((smt_chunks[0], len(dix)), fill_value=nodata, dtype="double")
vector_daily = dates.getDV(nodata)
# Shift for interpolation
for rdate in raw_dates_nsmooth:
rdate_shift = (fromjulian(rdate) + timedelta(tshift)).strftime("%Y%j")
dd_index = dates.daily.index(rdate_shift)
vector_daily[dd_index] = -1
else:
arr_smt = arr_raw
# use HDF5 base for reading rawdata
raw_h5 = HDF5Base(str(self.rawfile))
chunk_generator = raw_h5.read_chunked(
dataset="data",
xoffset=read_offset,
xchunk=10,
arr_out=arr_raw,
)
if soptimize or svalue is None:
sgrid_generator = self.read_chunked(dataset="sgrid")
else:
sgrid_generator = None
arr_sgrid = None
# counter
chunk_counter = 0
# iterate over chunks
log.debug("Iterating over chunks")
for arr_raw_chunk in chunk_generator:
log.debug("Chunk %s", chunk_counter)
# create weights
wts = (arr_raw_chunk != nodata)
wts = wts.astype("uint8")
ndix = np.sum(wts, 1) >= (arr_raw.shape[1] * 0.2) # 20%+ data
map_index = np.where(ndix)[0]
if sgrid_generator:
arr_sgrid = next(sgrid_generator)
for ix in map_index:
w = wts[ix, :].astype("double")
if soptimize:
if srange is None:
lag_correlation = lag1corr(arr_raw[ix, :-1], arr_raw[ix, 1:], nodata)
if lag_correlation > 0.5:
sr = np.arange(-2, 1.2, 0.2).round(2)
elif lag_correlation <= 0.5:
sr = np.arange(0, 3.2, 0.2).round(2)
else:
sr = np.arange(-1, 1.2, 0.2).round(2)
else:
sr = srange
if p is not None:
arr_raw[ix, :], arr_sgrid[ix] = ws2doptvp(y=arr_raw[ix, :],
w=w,
llas=array("d", sr),
p=p)
else:
arr_raw[ix, :], arr_sgrid[ix] = ws2doptv(y=arr_raw[ix, :],
w=w,
llas=array("d", sr))
else:
if svalue is None:
s = 10 ** arr_sgrid[ix]
else:
s = 10 ** svalue
if p is None:
arr_raw[ix, :] = ws2d(y=arr_raw[ix, :], lmda=s, w=w)
else:
arr_raw[ix, :] = ws2dp(y=arr_raw[ix, :], lmda=s, w=w, p=p)
if self.tinterpolate:
arr_smt[ix, :] = self._apply_tinterpolate(
z1=arr_raw[ix, :],
nodata=nodata,
vector_daily=vector_daily,
dix=dix,
)
arr_smt = np.rint(arr_smt, out=arr_smt)
write_check = self.write_chunk(
dataset="data",
arr_in=arr_smt,
xoffset=write_offset,
xchunk=10,
yoffset=chunk_counter*raw_chunks[0],
)
if not write_check:
msg = "Error writing to %s"
log.error(msg, self.filename)
raise HDF5WriteError(msg % self.filename)
if arr_sgrid is not None:
arr_sgrid[arr_sgrid > 0] = np.log10(arr_sgrid[arr_sgrid > 0])
write_check = self.write_chunk(
dataset="sgrid",
arr_in=arr_sgrid,
| |
"sqlite>=3.9.0"])
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(["sqlite", "oracle"])
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(["sqlite", "postgresql"])
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(
["firebird", "mssql", "mysql", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support",
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return skip_if(exclude("mysql", "<", (4, 1, 1)), "no subquery support")
@property
def ctes(self):
"""Target database supports CTEs"""
return only_on(
[
lambda config: against(config, "mysql")
and (
config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info
>= (10, 2)
),
"postgresql",
"mssql",
"oracle",
]
)
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return only_on(
[
"postgresql",
"mssql",
# "oracle" - oracle can do this but SQLAlchemy doesn't support
# their syntax yet
]
)
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return only_if(["postgresql"])
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(["mysql", "sqlite", "postgresql+psycopg2", "mssql"])
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for INTERSECT",
)
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for EXCEPT",
)
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
Fails on SQL Server
"""
return fails_if("mssql")
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite.
"""
return fails_if("sqlite")
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return fails_if(["sqlite", "oracle"])
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if(["sybase"], "no support for OFFSET or equivalent")
@property
def sql_expression_limit_offset(self):
return (
fails_if(
["mysql"],
"Target backend can't accommodate full expressions in "
"OFFSET or LIMIT",
)
+ self.offset
)
@property
def window_functions(self):
return only_if(
["postgresql>=8.4", "mssql", "oracle", "sqlite>=3.25.0"],
"Backend does not support window functions",
)
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return skip_if(
[
no_support("firebird", "no SA implementation"),
no_support("mssql", "two-phase xact not supported by drivers"),
no_support(
"oracle", "two-phase xact not implemented in SQLA/oracle"
),
no_support(
"drizzle", "two-phase xact not supported by database"
),
no_support(
"sqlite", "two-phase xact not supported by database"
),
no_support(
"sybase", "two-phase xact not supported by drivers/SQLA"
),
no_support(
"postgresql+zxjdbc",
"FIXME: JDBC driver confuses the transaction state, "
"may need separate XA implementation",
),
no_support(
"mysql",
"recent MySQL communiity editions have too many issues "
"(late 2016), disabling for now",
),
]
)
@property
def two_phase_recovery(self):
return self.two_phase_transactions + (
skip_if("mysql", "crashes on most mariadb and mysql versions")
)
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""
target database can persist/return an empty string with a varchar.
"""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return fails_if(
["oracle"],
"ORA-00932: inconsistent datatypes: expected - got CLOB",
)
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([no_support("sybase", "no unicode driver support")])
@property
def unicode_connections(self):
"""
Target driver must support some encoding of Unicode across the wire.
"""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if(
[exclude("mysql", "<", (4, 1, 1), "no unicode connection support")]
)
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return skip_if(
[
no_support("oracle", "FIXME: no support in database?"),
no_support("sybase", "FIXME: guessing, needs confirmation"),
no_support("mssql+pymssql", "no FreeTDS support"),
LambdaPredicate(
lambda config: against(config, "mysql+mysqlconnector")
and config.db.dialect._mysqlconnector_version_info > (2, 0)
and util.py2k,
"bug in mysqlconnector 2.0",
),
exclude(
"mysql", "<", (4, 1, 1), "no unicode connection support"
),
]
)
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except(
"mysql", "sqlite+pysqlite", "sqlite+pysqlcipher", "sybase", "mssql"
)
@property
def implements_get_lastrowid(self):
return skip_if([no_support("sybase", "not supported by database")])
@property
def dbapi_lastrowid(self):
""""target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if(
"mssql+pymssql", "crashes on pymssql"
) + fails_on_everything_except(
"mysql", "sqlite+pysqlite", "sqlite+pysqlcipher"
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except(
"postgresql", "oracle", "firebird", "sqlite >= 3.30.0"
)
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate"""
return skip_if(["mssql", "sqlite"])
@property
def array_type(self):
return only_on(
[
lambda config: against(config, "postgresql")
and not against(config, "+pg8000")
and not against(config, "+zxjdbc")
]
)
@property
def json_type(self):
return only_on(
[
lambda config: against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"postgresql >= 9.3",
self._sqlite_json,
]
)
@property
def json_index_supplementary_unicode_element(self):
# for sqlite see https://bugs.python.org/issue38749
return skip_if(
[
lambda config: against(config, "mysql")
and config.db.dialect._is_mariadb,
"sqlite",
]
)
def _sqlite_json(self, config):
if not against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.scalar(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
)
== "bar"
)
except exc.DBAPIError:
return False
@property
def reflects_json_type(self):
return only_on(
[
lambda config: against(config, "mysql >= 5.7")
and not config.db.dialect._is_mariadb,
"postgresql >= 9.3",
"sqlite >= 3.9",
]
)
@property
def json_array_indexes(self):
return self.json_type + fails_if("+pg8000")
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(
["mssql", "mysql", "firebird", "+zxjdbc", "oracle", "sybase"]
)
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return only_on(["oracle"])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
# does not work as of pyodbc 4.0.22
return fails_on("mysql+mysqlconnector") + skip_if("mssql+pyodbc")
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(["oracle"])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(
["mssql", "mysql", "firebird", "+zxjdbc", "oracle", "sybase"]
)
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return fails_if(
[
(
"sybase+pyodbc",
None,
None,
"Don't know how do get these values through "
"FreeTDS + Sybase",
),
("firebird", None, None, "Precision must be from 1 to 18"),
]
| |
#!/usr/bin/env python3
from __future__ import print_function
import os
from os import environ, chmod
import json
import shutil
import hashlib
import pathlib
import logging
from time import sleep
from types import GeneratorType
from collections import Counter, defaultdict
import psutil # sigh
import appdirs
import requests
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
# read environment variables # FIXME not the most modular...
__all__ = ['api_token', 'username', 'group', 'group_to_memfile',
'idFromShareLink', 'shareLinkFromId',
'AnnoFetcher', 'Memoizer',
'HypothesisUtils', 'HypothesisHelper', 'Annotation', 'HypAnnoId']
api_token = environ.get('HYP_API_TOKEN', 'TOKEN') # Hypothesis API token
username = environ.get('HYP_USERNAME', 'USERNAME') # Hypothesis username
group = environ.get('HYP_GROUP', '__world__')
ucd = appdirs.user_cache_dir()
class JEncode(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, tuple):
return list(obj)
elif isinstance(obj, HypothesisAnnotation):
return obj._row
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def group_to_memfile(group, post=lambda group_hash:None):
if group != '__world__':
m = hashlib.sha256()
m.update(group.encode())
group_hash = m.hexdigest()
else:
group_hash = group
memfile = pathlib.Path(ucd, 'hyputils', f'annos-{group_hash}.json')
post(group_hash)
memfile.parent.mkdir(exist_ok=True, parents=True) # FIXME remove after orthauth switch
return memfile
def makeSimpleLogger(name, level=logging.INFO):
# TODO use extra ...
logger = logging.getLogger(name)
if logger.handlers: # prevent multiple handlers
return logger
logger.setLevel(level)
ch = logging.StreamHandler() # FileHander goes to disk
fmt = ('[%(asctime)s] - %(levelname)8s - '
'%(name)14s - '
'%(filename)16s:%(lineno)-4d - '
'%(message)s')
formatter = logging.Formatter(fmt)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
log = makeSimpleLogger('hyputils')
logd = log.getChild('data')
if 'CI' not in environ:
log.debug(' '.join((api_token, username, group))) # sanity check
# simple uri normalization
def norm(iri):
if '://' in iri:
_scheme, iri_norm = iri.split('://', 1)
if '?hypothesisAnnotationId=' in iri:
iri_norm, junk = iri.split('?hypothesisAnnotationId=', 1)
else:
iri_norm = iri # the creeping madness has co
return iri_norm
# annotation retrieval and memoization
class NotOkError(Exception):
def __init__(self, message, request):
self.status_code = request.status_code
self.reason = request.reason
super().__init__(message)
class AnnoFetcher:
lsu_default = '1900-01-01T00:00:00.000000+00:00' # don't need, None is ok
def __init__(self, api_token=api_token, username=username, group=group,
**kwargs):
if api_token == 'TOKEN':
log.warning('\x1b[31mWARNING:\x1b[0m NO API TOKEN HAS BEEN SET!')
self.api_token = api_token
self.username = username
self.group = group
def __call__(self):
return self.get_annos()
def h(self):
return HypothesisUtils(username=self.username,
token=self.api_token,
group=self.group)
def yield_from_api(self,
search_after=None,
limit=None,
max_results=None,
stop_at=None):
# use stop at if you want to be evil and hit the api in parallel
log.info(f'fetching after {search_after}')
# hard code these to simplify assumptions
order = 'asc'
sort = 'updated'
h = self.h()
params = {'order':order,
'sort':sort,
'group':h.group}
if search_after:
params['search_after'] = search_after
if max_results is None and self.group == '__world__':
log.info(f'searching __world__ as {self.username} since max_results was not set')
params['user'] = self.username
if limit is not None:
params['limit'] = limit
for row in h.search_all(params, max_results=max_results, stop_at=stop_at):
yield row
def get_annos_from_api(self,
search_after=None,
limit=None,
max_results=None,
stop_at=None):
return [HypothesisAnnotation(r) for r in
self.yield_from_api(search_after=search_after,
limit=limit,
max_results=max_results,
stop_at=stop_at)]
class AnnoReader:
class GroupMismatchError(Exception):
pass
def __init__(self, memoization_file, group, *args, **kwargs):
self.group = group
if memoization_file is None:
memoization_file = group_to_memfile(group)
elif not isinstance(memoization_file, pathlib.Path):
memoization_file = pathlib.Path(memoization_file)
self.memoization_file = memoization_file
def __call__(self):
return self.get_annos()
def get_annos(self):
annos, last_sync_updated = self.get_annos_from_file()
return annos
def get_annos_from_file(self, file=None):
if file is None:
file = self.memoization_file
jblobs = []
last_sync_updated = None
if file is not None:
try:
with open(file, 'rt') as f:
jblobs_lsu = json.load(f)
try:
jblobs, last_sync_updated = jblobs_lsu
if not isinstance(last_sync_updated, str):
msg = ('We have probably hit the rare case where there '
'are exactly two annotations in a cache file.')
raise ValueError(msg)
except ValueError:
jblobs = jblobs_lsu
last_sync_updated = jblobs[-1]['updated']
if jblobs is None:
raise ValueError('wat')
except json.decoder.JSONDecodeError:
with open(file, 'rt') as f:
data = f.read()
if not data:
log.info('memoization file exists but is empty')
except FileNotFoundError:
log.info('memoization file does not exist')
annos = [HypothesisAnnotation(jb) for jb in jblobs]
self.check_group(annos)
return annos, last_sync_updated
def check_group(self, annos):
if annos:
group = annos[0].group
if self.group != group:
msg = f'Groups do not match! {self.group} {group}'
raise self.GroupMismatchError(msg)
class Memoizer(AnnoReader, AnnoFetcher): # TODO just use a database ...
def __init__(self, memoization_file=None,
api_token=api_token,
username=username,
group=group):
# SIGH
AnnoReader.__init__(self,
memoization_file=memoization_file,
group=group)
AnnoFetcher.__init__(self,
memoization_file=memoization_file,
api_token=api_token,
username=username,
group=group)
lock_name = '.lock-' + self.memoization_file.stem
self._lock_folder = self.memoization_file.parent / lock_name
def add_missing_annos(self, annos, last_sync_updated): # XXX deprecated
""" this modifies annos in place """
self.check_group(annos)
search_after = last_sync_updated
# start from last_sync_updated since the websocket is unreliable
new_annos = self._stream_annos_from_api(annos, search_after)
return annos
def update_annos_from_api(self,
annos,
helpers=tuple(),
start_after=None,
stop_at=None,
batch_size=2000):
self.check_group(annos)
if annos:
if start_after is not None:
raise TypeError('cannot have both non-empty annos and '
'not None start_after at the same time')
last_sync_updated = annos[-1].updated
search_after = last_sync_updated
new_annos = self._stream_annos_from_api(annos,
search_after,
stop_at,
batch_size,
helpers)
for anno in new_annos:
for Helper in helpers:
Helper(anno, annos)
return new_annos
def _stream_annos_from_api(self,
annos,
search_after,
stop_at=None,
batch_size=2000,
helpers=tuple()):
# BUT FIRST check to make sure that no one else is in the middle of fetching into our anno file
# YES THIS USES A LOCK FILE, SIGH
can_update = not self._lock_folder.exists()
if can_update:
# TODO in a multiprocess context streaming anno updates
# is a nightmare, even in this context if we call get_annos
# more than once there is a risk that only some processes
# will get the new annos, though I guess that is ok
# in the sense that they will go look for new annos starting
# wherever they happen to be and will double pull any annos
# that were previously pulled by another process in addition
# to any annoations that happend after the other process pulled
# the only inconsistency would be if an annoation was deleted
# since we already deal with the update case
self._lock_folder.mkdir()
new_annos = self._can_update(annos, search_after, stop_at, batch_size)
elif self._locking_process_dead():
if self._lock_pid_file.exists():
# folder might exist by itself with no lock-pid file
self._unlock_pid()
_search_after = self._lock_folder_lsu()
search_after = (search_after
if _search_after is None else
_search_after)
new_annos = self._can_update(annos, search_after, stop_at, batch_size)
else:
new_annos = self._cannot_update(annos)
return new_annos
def _can_update(self, annos, search_after, stop_at, batch_size):
""" only call this if we can update"""
try:
self._write_lock_pid()
gen = self.yield_from_api(search_after=search_after,
stop_at=stop_at)
try:
while True:
first = [next(gen)] # stop iteration breaks the loop
rest = [anno for i, anno in zip(range(batch_size - 1), gen)]
batch = first + rest
lsu = batch[-1]['updated']
file = self._lock_folder / lsu # FIXME windows
with open(file, 'wt') as f:
json.dump(batch, f)
except StopIteration:
pass
except:
raise
else: # I think this is the first time I've ever had to use this
new_annos = self._lock_folder_to_json(annos)
shutil.rmtree(self._lock_folder)
return new_annos
finally:
if self._lock_pid_file.exists():
self._unlock_pid()
def _cannot_update(self, annos):
# we have to block here until the annos are updated and the
# lock folder is removed so we can extend the current annos
while True:
sleep(1) # sigh
# FIXME in theory this wait could
# lead to some workers never waking up
# if calls to update from other workers
# happen frequently enough
if not self._lock_folder.exists():
break
all_annos, lsu = self.get_annos_from_file()
# this approach is safter than direct comparison of all_annos and annos
# because it makes it possible to detect duplicates from updates
new_annos = [a for a in all_annos if a.updated > lsu]
self._merge_new_annos(annos, new_annos)
# we don't need to memoize here
return new_annos
@property
def _lock_pid_file(self):
return self._lock_folder.parent / 'lock-pid'
def _write_lock_pid(self):
if self._lock_pid_file.exists():
raise FileExistsError(self._lock_pid_file)
p = psutil.Process()
data = f'{p.pid},{p._create_time}'
with open(self._lock_pid_file, 'wt') as f:
f.write(data)
@property
def _lock_pidinfo(self):
if self._lock_pid_file.exists():
with open(self._lock_pid_file, 'rt') as f:
data = f.read()
spid, screate_time = data.split(',')
pid = int(spid)
create_time = float(screate_time)
return pid, create_time
else:
return None, None
def _locking_process_dead(self):
pid, create_time = self._lock_pidinfo
if pid is None:
# pidinfo file doesn't exist so the lock folder
# is not handled by us
return True
if not psutil.pid_exists(pid):
return True
p = psutil.Process(pid)
return p._create_time != create_time
def _unlock_pid(self):
self._lock_pid_file.unlink()
def _lock_folder_lsu(self):
paths = sorted(self._lock_folder.iterdir())
if paths:
last = paths[-1]
more_annos, last_sync_updated = self.get_annos_from_file(last)
return last_sync_updated
def _get_annos_from_folder(self):
new_annos = []
last_sync_updated = None
for jpath in sorted(self._lock_folder.iterdir()):
more_annos, last_sync_updated = self.get_annos_from_file(jpath)
new_annos.extend(more_annos)
return new_annos, last_sync_updated
def _lock_folder_to_json(self, annos):
new_annos, lsu = self._get_annos_from_folder()
if new_annos:
self._merge_new_annos(annos, new_annos)
self.memoize_annos(annos)
return new_annos
def _merge_new_annos(self, annos, new_annos):
new_ids = set(a.id for a in new_annos)
n_updated = 0
for anno in tuple(annos):
if anno.id in new_ids:
annos.remove(anno) # FIXME | |
<reponame>NonExistentUsername/explosive-kittens-bot
from os import makedirs
from notifications import Notifications
from chats_manager import ChatsManager
from notifications_processor import NotificationsProcessor
from games_manager import GamesManager
from commands import Commands
from messages import Messages
from string_generators import *
from telebot import types
from cards import *
class BotProcessor:
##########################################################
@property
def __chat_id(self):
if self.__message != None:
return self.__message.chat.id
elif self.__call != None:
return self.__call.message.chat.id
return None
@property
def __u(self):
if self.__message != None:
return self.__message.from_user
elif self.__call != None:
return self.__call.from_user
elif self.__inline_result != None:
return self.__inline_result.from_user
return None
@property
def __user_id(self):
if self.__u != None:
return self.__u.id
return None
##########################################################
def __create(self):
self.__gms_controller.create_game(self.__message.chat.id)
def __close(self):
self.__gms_controller.close_game(self.__message.chat.id)
def __join(self):
self.__gms_controller.join_game(self.__chat_id, self.__user_id)
def __leave(self):
self.__gms_controller.leave_game(self.__chat_id, self.__user_id)
def __leave_all(self):
self.__gms_controller.leave_all_games(self.__message.from_user.id)
def __play(self):
self.__gms_controller.start_game(self.__message.chat.id)
def __take_card(self):
self.__gms_controller.take_card(self.__user_id)
def __place_card(self):
self.__gms_controller.place_card(self.__user_id, self.__message.text[4:])
def __boom(self):
self.__gms_controller.boom(self.__user_id)
def __choose_player(self, data):
__user_id = int(data.split()[1])
self.__gms_controller.choose_player(self.__user_id, __user_id)
def __choose_card(self, data):
__card_type = int(data.split()[1])
self.__gms_controller.choose_card(self.__user_id, __card_type)
def __special2(self):
self.__gms_controller.special2(self.__user_id)
def __special3(self):
self.__gms_controller.special3(self.__user_id)
def __special5(self):
self.__gms_controller.special5(self.__user_id)
def __undo_special(self):
self.__gms_controller.undo_special(self.__user_id)
def __cards(self):
self.__gms_controller.get_deck(self.__user_id)
def __menu(self):
if self.__message != None:
if self.__message.chat.id in self.__menus:
self.__bot.reply_to(self.__menus[self.__message.chat.id], Messages.MENU)
else:
self.__game_not_started()
##########################################################
@staticmethod
def __generate_players_count(count):
return '🔷 Количество игроков: ' + str(count)
@staticmethod
def __generate_cards_count(count):
return '🔷 Количество карт в колоде: ' + str(count)
##########################################################
@staticmethod
def __generate_jl_markup(empty):
if empty:
return None
markup = types.InlineKeyboardMarkup(row_width=2)
__join = types.InlineKeyboardButton(text='Подключиться', callback_data='join')
__leave = types.InlineKeyboardButton(text='Выйти', callback_data='leave')
markup.add(__join, __leave)
return markup
@staticmethod
def __generate_deck_button_markup(empty):
if empty:
return None
markup = types.InlineKeyboardMarkup(row_width=2)
__deck = types.InlineKeyboardButton(text='Открыть колоду', switch_inline_query_current_chat='')
markup.add(__deck)
return markup
@staticmethod
def __generate_take_card_markup(empty):
if empty:
return None
markup = types.InlineKeyboardMarkup(row_width=2)
__take_card = types.InlineKeyboardButton(text='Взять карту', callback_data='take_card')
__deck = types.InlineKeyboardButton(text='Открыть колоду', switch_inline_query_current_chat='')
markup.add(__take_card, __deck)
return markup
@staticmethod
def __generate_boom_markup():
markup = types.InlineKeyboardMarkup(row_width=1)
__take_card = types.InlineKeyboardButton(text='Взорваться!', callback_data='boom')
markup.add(__take_card)
return markup
def __generate_user_choosing_markup(self, chat_id):
__card_counts = self.__gms_controller.get_no_cards_in_decks(chat_id)
markup = types.InlineKeyboardMarkup(row_width=1)
__users = self.__gms_controller.get_users(chat_id)
for i in range(len(__users)):
user_id = __users[i]
markup.add(types.InlineKeyboardButton(text=self.__user_id_to_user[user_id].first_name + ' (' + str(__card_counts[i]) + ')', callback_data='choose_player ' + str(user_id)))
return markup
@staticmethod
def __generate_card_type_choosing_markup():
markup = types.InlineKeyboardMarkup(row_width=2)
__create_button = lambda __type: types.InlineKeyboardButton(text=convert_card_type_no_name(__type), callback_data='choose_card ' + str(__type))
__type = 2
while __type < 14:
__a = __create_button(__type)
__type += 1
__b = __create_button(__type)
__type += 1
markup.add(__a, __b)
# markup.add(types.InlineKeyboardButton(text='Обезвредь', callback_data='choose_card 2'), types.InlineKeyboardButton(text='Подсмуртри грядущее', callback_data='choose_card 3'))
# markup.add(types.InlineKeyboardButton(text='Неть', callback_data='choose_card 4'), types.InlineKeyboardButton(text='Затасуй', callback_data='choose_card 5'))
# markup.add(types.InlineKeyboardButton(text='Слиняй', callback_data='choose_card 6'), types.InlineKeyboardButton(text='Подлижись', callback_data='choose_card 7'))
# markup.add(types.InlineKeyboardButton(text='Атака', callback_data='choose_card 8'), types.InlineKeyboardButton(text='Кот радугапожиратель', callback_data='choose_card 9'))
# markup.add(types.InlineKeyboardButton(text='Волосатый кот-картошка', callback_data='choose_card 10'), types.InlineKeyboardButton(text='Такокот', callback_data='choose_card 11'))
# markup.add(types.InlineKeyboardButton(text='Арбузный кот', callback_data='choose_card 12'), types.InlineKeyboardButton(text='Бородакот', callback_data='choose_card 13'))
return markup
##########################################################
def __generate_create_message(self, chat_id, default = False):
if default:
return Messages.GAME_CREATED
else:
return Messages.GAME_CREATED + '\n' + \
BotProcessor.__generate_players_count(self.__gms_controller.get_users_count(chat_id))
def __generate_users_list(self, chat_id):
__str = ''
__users = self.__gms_controller.get_users(chat_id)
__index = 1
for user_id in __users:
__str += Messages.P_NEUTRAL + ' ' + str(__index) + '. ' + self.__user_id_to_user[user_id].first_name + '\n'
__index += 1
return __str
def __generate_start_message(self, chat_id, default = False):
if default:
return Messages.GAME_STARTED
else:
return Messages.GAME_STARTED + '\n' + \
BotProcessor.__generate_players_count(self.__gms_controller.get_users_count(chat_id)) + '\n' + \
BotProcessor.__generate_cards_count(self.__gms_controller.get_deck_cards_count(chat_id)) + '\n\n' + \
self.__generate_users_list(chat_id)
def __generate_start_message_entities(self, chat_id, default = False):
print('__generate_start_message_entities')
if default:
return []
__result = []
__prefix_len = len(Messages.GAME_STARTED + '\n' + \
BotProcessor.__generate_players_count(self.__gms_controller.get_users_count(chat_id)) + '\n' + \
BotProcessor.__generate_cards_count(self.__gms_controller.get_deck_cards_count(chat_id)) + '\n\n') + 2
__users = self.__gms_controller.get_users(chat_id)
__d_len = 0
for user_id in __users:
__d_len += len(Messages.P_NEUTRAL) + len(' 1. ') + 1
__user = self.__user_id_to_user[user_id]
__result.append(types.MessageEntity('text_mention', __prefix_len + __d_len, len(__user.first_name), user=__user.__dict__))
__d_len += len(__user.first_name) + 1
return __result
##########################################################
def __create_message_updater(self, chat_id, message_id, default = False):
try:
self.__bot.edit_message_text(self.__generate_create_message(chat_id, default), chat_id, message_id,
reply_markup=BotProcessor.__generate_jl_markup(default))
except Exception:
pass
def __start_message_updater(self, chat_id, message_id, default = False):
try:
print('__start_message_updater processed defalut = {0}'.format(default))
self.__bot.edit_message_text(self.__generate_start_message(chat_id, default), chat_id, message_id,
reply_markup=BotProcessor.__generate_take_card_markup(default),
entities=self.__generate_start_message_entities(chat_id, default))
print('__start_message updated')
except Exception as e:
print(e)
##########################################################
def __game_created(self):
message_id = self.__bot.send_message(self.__chat_id, self.__generate_create_message(self.__chat_id),
reply_markup=BotProcessor.__generate_jl_markup(False)
).message_id
self.__messages.set(self.__chat_id, message_id, self.__create_message_updater)
def __game_closed(self, **data):
self.__messages.remove(self.__message.chat.id)
if self.__message.chat.id in self.__menus:
del self.__menus[self.__message.chat.id]
for user_id in data['users']:
del self.__user_id_to_user[user_id]
self.__bot.reply_to(self.__message, Messages.GAME_CLOSED)
def __game_started(self):
self.__messages.remove(self.__chat_id)
self.__menus[self.__message.chat.id] = self.__bot.send_message(self.__chat_id, self.__generate_start_message(self.__chat_id),
reply_markup=BotProcessor.__generate_take_card_markup(False),
entities=self.__generate_start_message_entities(self.__chat_id, False))
print('__game_started sef.__messages.set proccessed')
self.__messages.set(self.__chat_id, self.__menus[self.__message.chat.id].message_id, self.__start_message_updater)
def __game_already_started(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.GAME_ALREADY_STARTED)
def __game_already_created(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.GAME_ALREADY_CREATED)
def __player_joined(self):
self.__messages.update(self.__chat_id)
self.__user_id_to_user[self.__u.id] = self.__u
self.__bot.send_message(self.__chat_id, Messages.PLAYER_PREFIX + self.__u.first_name + Messages.PLAYER_JOINED_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __player_already_in_game(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.PLAYER_ALREADY_IN_GAME)
def __player_in_another_game(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.PLAYER_IN_ANOTHER_GAME)
def __game_not_created(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.GAME_NOT_CREATED)
def __no_players_not_valid(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.NO_PLAYERS_NOT_VALID)
def __player_not_in_game(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.PLAYER_NOT_IN_GAME)
def __player_leaves(self, **data):
self.__messages.update(data['chat_id'])
del self.__user_id_to_user[self.__user_id]
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__u.first_name + Messages.PLAYER_LEAVES_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __card_taken(self):
self.__messages.update(self.__chat_id)
self.__bot.send_message(self.__chat_id, Messages.PLAYER_PREFIX + self.__u.first_name + Messages.PLAYER_TAKE_CARD_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __another_player_turn(self, **data):
if self.__message != None:
self.__bot.send_message(self.__chat_id, Messages.ANOTHER_PLAYER_TURN + self.__user_id_to_user[data['user_id']].first_name,
entities=[types.MessageEntity('text_mention', len(Messages.ANOTHER_PLAYER_TURN), len(self.__user_id_to_user[data['user_id']].first_name), user= self.__user_id_to_user[data['user_id']].__dict__)])
def __explosive_kitten(self, **data):
self.__messages.update(self.__chat_id)
self.__bot.send_sticker(self.__chat_id, card_id_to_sticker[data['card_id']], reply_markup=self.__generate_boom_markup())
self.__bot.send_message(self.__chat_id, Messages.PLAYER_PREFIX + self.__user_id_to_user[self.__user_id].first_name + Messages.PLAYER_TOOK_EXPLOSIVE_KITTEN_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __explosive_kitten_neutralized(self, **data):
self.__messages.update(self.__chat_id)
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[self.__user_id].first_name + Messages.PLAYER_NEUTRALIZED_EXPLISIVE_KITTEN_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
self.__bot.send_message(data['chat_id'], Messages.PLACE_CARD_TUTORIAL_MINI.format(self.__gms_controller.get_deck_cards_count(data['chat_id']) + 1))
def __place_card_failed(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.PLACE_CARD_TUTORIAL.format(self.__gms_controller.get_deck_cards_count(self.__gms_controller.get_game_id(self.__user_id)) + 1))
def __game_not_started(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.GAME_NOT_STARTED)
def __card_placed(self, **data):
self.__messages.update(self.__chat_id)
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[self.__user_id].first_name + Messages.PLAYER_PLACED_EXPLOSIVE_KITTEN_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __player_boom(self, **data):
self.__messages.update(self.__chat_id)
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[self.__user_id].first_name + Messages.PLAYER_BOOM_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
del self.__user_id_to_user[self.__user_id]
def __player_shuffled_deck(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[self.__user_id].first_name + Messages.PLAYER_SHUFFLED_DECK_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __player_ran_away(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[self.__user_id].first_name + Messages.PLAYER_RAN_AWAY_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__u.first_name), user=self.__u.__dict__)])
def __next_player_turn(self, **data):
self.__bot.send_message(data['chat_id'], Messages.NEXT_PLAYER_TURN + self.__user_id_to_user[data['user_id']].first_name,
# reply_markup=BotProcessor.__generate_deck_button_markup(False),
entities=[types.MessageEntity('text_mention', len(Messages.NEXT_PLAYER_TURN)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __player_attacked(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_ATTACKED_SUFFIX.format(data['turns_count']),
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __player_see_the_future(self, **data):
try:
for card_id in data['cards']:
self.__bot.send_sticker(data['user_id'], card_id_to_sticker[card_id])
# self.__gms_controller.see_the_future_successful(data['user_id'])
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_SEEN_THE_FUTURE_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
self.__gms_controller.see_the_future_successful(data['user_id'])
except Exception as e:
# self.__gms_controller.undo(data['chat_id'])
self.__bot.send_message(data['chat_id'], Messages.SEE_THE_FUTURE_FAILED)
def __player_canceled_last_action(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_CANCELED_LAST_ACTION_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __favor_player_choosing(self, **data):
self.__bot.send_message(data['chat_id'], Messages.FAVOR_PLAYER_CHOOSING, reply_markup=self.__generate_user_choosing_markup(data['chat_id']))
def __favor_card_choosing(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_MUST_CH0OSE_CARD_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __favor_player_choosed_with_empty_deck(self, **data):
self.__bot.send_message(data['chat_id'], Messages.FAVOR_PLAYER_CHOOSED_WITH_EMPTY_DECK)
def __game_end(self, **data):
self.__messages.remove(data['chat_id'])
if data['chat_id'] in self.__menus:
del self.__menus[data['chat_id']]
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_WON_THE_GAME_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
for user_id in data['users']:
del self.__user_id_to_user[user_id]
def __player_cant_use_this_combination(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.CANT_USE_THIS_COMBINATION)
def __player_choosing_cards_for_comb(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_CHOOSING_CARDS_FOR_COMB_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __special2_done(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.SPECIAL2_DONE_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __player_has_no_card(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX_2 + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_HAS_NO_CARD_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX_2)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __player_choosed(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_CHOOSED + self.__user_id_to_user[data['user_id']].first_name,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_CHOOSED)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __card_type_choosing(self, **data):
self.__bot.send_message(data['chat_id'], Messages.CHOOSE_CARD_TYPE, reply_markup=BotProcessor.__generate_card_type_choosing_markup())
def __special3_failed(self, **data):
self.__bot.send_message(data['chat_id'], Messages.SPECIAL3_FAILED_WITH_THIS_TYPE)
def __special3_done(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.SPECIAL3_DONE_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __turn_canceled(self, **data):
self.__bot.send_message(data['chat_id'], Messages.TURN_CANCELED)
def __discard_is_empty(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.DISCARD_IS_EMPTY)
def __undo_special_successfull(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.CANCELED_SPECIAL_COMB_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __undo_special_failed(self):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.UNDO_SPECIAL_FAILED)
def __card_type_choosed(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.CARD_TYPE_CHOOSED_SUFFIX + convert_card_type_no_name(data['card_type']),
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __player_taking_card_from_discard(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_TAKING_CARD_FROM_DISCARD_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __special5_waiting(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_WAITING_TO_TAKE_CARD_FROM_DISCARD_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __player_took_card_from_discard(self, **data):
self.__bot.send_message(data['chat_id'], Messages.PLAYER_PREFIX + self.__user_id_to_user[data['user_id']].first_name + Messages.PLAYER_TOOK_CARD_FROM_DISCARD_SUFFIX,
entities=[types.MessageEntity('text_mention', len(Messages.PLAYER_PREFIX)+1, len(self.__user_id_to_user[data['user_id']].first_name), user=self.__user_id_to_user[data['user_id']].__dict__)])
def __print_deck_size(self, **data):
if self.__message != None:
self.__bot.reply_to(self.__message, Messages.DECK_SIZE + str(data['deck_size']))
def __yourself_cannot_be_choosen(self, **data):
self.__bot.send_message(data['chat_id'], Messages.YOURSELF_CAN_NOT_BE_CHOOSEN)
##########################################################
def __init_notif_processor(self):
self.__notif_processor.bind(Notifications.GAME_CREATED, self.__game_created)
self.__notif_processor.bind(Notifications.GAME_CLOSED, self.__game_closed)
self.__notif_processor.bind(Notifications.GAME_ALREADY_CREATED, self.__game_already_created)
self.__notif_processor.bind(Notifications.GAME_NOT_CREATED, self.__game_not_created)
self.__notif_processor.bind(Notifications.PLAYER_JOINED, self.__player_joined)
self.__notif_processor.bind(Notifications.NO_PLAYERS_NOT_VALID, self.__no_players_not_valid)
self.__notif_processor.bind(Notifications.PLAYER_ALREADY_IN_GAME, self.__player_already_in_game)
self.__notif_processor.bind(Notifications.PLAYER_IN_ANOTHER_GAME, self.__player_in_another_game)
self.__notif_processor.bind(Notifications.PLAYER_NOT_IN_GAME, self.__player_not_in_game)
self.__notif_processor.bind(Notifications.PLAYER_LEAVES, self.__player_leaves)
self.__notif_processor.bind(Notifications.GAME_STARTED, self.__game_started)
self.__notif_processor.bind(Notifications.GAME_ALREADY_STARTED, self.__game_already_started)
self.__notif_processor.bind(Notifications.CARD_TAKEN, self.__card_taken)
self.__notif_processor.bind(Notifications.ANOTHER_PLAYER_TURN, self.__another_player_turn)
self.__notif_processor.bind(Notifications.EXPLOSIVE_KITTEN, self.__explosive_kitten)
self.__notif_processor.bind(Notifications.EXPLOSIVE_KITTEN_NEUTRALIZED, self.__explosive_kitten_neutralized)
self.__notif_processor.bind(Notifications.PLACE_CARD_FAILED, self.__place_card_failed)
self.__notif_processor.bind(Notifications.GAME_NOT_STARTED, self.__game_not_started)
self.__notif_processor.bind(Notifications.EXPLOSIVE_KITTEN_PLACED, self.__card_placed)
self.__notif_processor.bind(Notifications.PLAYER_BOOM, self.__player_boom)
self.__notif_processor.bind(Notifications.PLAYER_SHUFFLED_DECK, self.__player_shuffled_deck)
self.__notif_processor.bind(Notifications.PLAYER_RAN_AWAY, self.__player_ran_away)
self.__notif_processor.bind(Notifications.NEXT_PLAYER_TURN, self.__next_player_turn)
self.__notif_processor.bind(Notifications.PLAYER_ATTACKED, self.__player_attacked)
self.__notif_processor.bind(Notifications.SEE_THE_FUTURE, self.__player_see_the_future)
self.__notif_processor.bind(Notifications.PLAYER_CANCELED_LAST_ACTION, self.__player_canceled_last_action)
self.__notif_processor.bind(Notifications.FAVOR_PLAYER_CHOOSING, self.__favor_player_choosing)
self.__notif_processor.bind(Notifications.FAVOR_CARD_CHOOSING, self.__favor_card_choosing)
self.__notif_processor.bind(Notifications.FAVOR_PLAYER_CHOOSED_WITH_EMPTY_DECK, self.__favor_player_choosed_with_empty_deck)
self.__notif_processor.bind(Notifications.GAME_END, self.__game_end)
self.__notif_processor.bind(Notifications.PLAYER_CANT_USE_THIS_COMBINATION, self.__player_cant_use_this_combination)
self.__notif_processor.bind(Notifications.PLAYER_CHOOSING_CARDS_FOR_COMB, self.__player_choosing_cards_for_comb)
self.__notif_processor.bind(Notifications.SPECIAL2_DONE, self.__special2_done)
self.__notif_processor.bind(Notifications.PLAYER_HAS_NO_CARD, self.__player_has_no_card)
self.__notif_processor.bind(Notifications.PLAYER_CHOOSED, self.__player_choosed)
self.__notif_processor.bind(Notifications.CARD_TYPE_CHOOSING, self.__card_type_choosing)
self.__notif_processor.bind(Notifications.SPECIAL3_FAILED_WITH_THIS_TYPE, self.__special3_failed)
self.__notif_processor.bind(Notifications.SPECIAL3_DONE, self.__special3_done)
self.__notif_processor.bind(Notifications.TURN_CANCELED, self.__turn_canceled)
self.__notif_processor.bind(Notifications.DISCARD_IS_EMPTY, self.__discard_is_empty)
self.__notif_processor.bind(Notifications.UNDO_SPECIAL_SUCCESSFULL, self.__undo_special_successfull)
self.__notif_processor.bind(Notifications.UNDO_SPECIAL_FAILED, self.__undo_special_failed)
self.__notif_processor.bind(Notifications.CARD_TYPE_CHOOSED, self.__card_type_choosed)
self.__notif_processor.bind(Notifications.PLAYER_TAKING_CARD_FROM_DISCARD, self.__player_taking_card_from_discard)
self.__notif_processor.bind(Notifications.SPECIAL5_WAINTING, self.__special5_waiting)
self.__notif_processor.bind(Notifications.PLAYER_TOOK_CARD_FROM_DISCARD, self.__player_took_card_from_discard)
self.__notif_processor.bind(Notifications.PRINT_DECK_SIZE, self.__print_deck_size)
self.__notif_processor.bind(Notifications.YOURSELF_CAN_NOT_BE_CHOOSEN, self.__yourself_cannot_be_choosen)
# self.__notif_processor.bind()
def __init_command_reactions(self):
self.__command_reactions[Commands.CREATE] = self.__create
self.__command_reactions[Commands.JOIN] = self.__join
self.__command_reactions[Commands.LEAVE] = self.__leave
| |
<filename>heat/tests/test_translation_rule.py
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception
from heat.engine.cfn import functions as cfn_funcs
from heat.engine import function
from heat.engine.hot import functions as hot_funcs
from heat.engine import parameters
from heat.engine import properties
from heat.engine import translation
from heat.tests import common
class TestTranslationRule(common.HeatTestCase):
def test_translation_rule(self):
for r in translation.TranslationRule.RULE_KEYS:
props = properties.Properties({}, {})
rule = translation.TranslationRule(
props,
r,
['any'],
['value'] if r == 'Add' else 'value',
'value_name' if r == 'Replace' else None,
'client_plugin' if r == 'Resolve' else None,
'finder' if r == 'Resolve' else None)
self.assertEqual(rule.properties, props)
self.assertEqual(rule.rule, r)
if r == 'Add':
self.assertEqual(['value'], rule.value)
else:
self.assertEqual('value', rule.value)
if r == 'Replace':
self.assertEqual('value_name', rule.value_name)
else:
self.assertIsNone(rule.value_name)
def test_invalid_translation_rule(self):
props = properties.Properties({}, {})
exc = self.assertRaises(ValueError,
translation.TranslationRule,
'proppy', mock.ANY,
mock.ANY)
self.assertEqual('Properties must be Properties type. '
'Found %s.' % str, six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
'EatTheCookie',
mock.ANY,
mock.ANY)
self.assertEqual('There is no rule EatTheCookie. List of allowed '
'rules is: Add, Replace, Delete, Resolve.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
'networks.network',
'value')
self.assertEqual('source_path should be a list with path instead of '
'%s.' % str, six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
[],
mock.ANY)
self.assertEqual('source_path must be non-empty list with path.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
['any'],
mock.ANY,
'value_name')
self.assertEqual('Use value_name only for replacing list elements.',
six.text_type(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
props,
translation.TranslationRule.ADD,
['any'],
'value')
self.assertEqual('value must be list type when rule is Add.',
six.text_type(exc))
def test_add_rule_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': [
{'red': 'blue'}
],
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[{'red': props.get('bar')}])
rule.execute_rule()
self.assertIn({'red': 'dak'}, props.get('far'))
def test_add_rule_dont_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[{'red': props.get('bar')}])
rule.execute_rule()
self.assertEqual([{'red': 'dak'}], props.get('far'))
def test_add_rule_invalid(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': 'tran',
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.ADD,
['far'],
[props.get('bar')])
exc = self.assertRaises(ValueError, rule.execute_rule)
self.assertEqual('Add rule must be used only for lists.',
six.text_type(exc))
def test_replace_rule_map_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': {'red': 'tran'},
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual({'red': 'dak'}, props.get('far'))
def test_replace_rule_map_dont_exist(self):
schema = {
'far': properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual({'red': 'dak'}, props.get('far'))
def test_replace_rule_list_different(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
),
'bar': properties.Schema(
properties.Schema.STRING
)}
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
'bar': 'dak'
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
props.get('bar'))
rule.execute_rule()
self.assertEqual([{'red': 'dak'}, {'red': 'dak'}], props.get('far'))
def test_replace_rule_list_same(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
),
'blue': properties.Schema(
properties.Schema.STRING
)
}
)
)}
data = {
'far': [{'blue': 'white'},
{'red': 'roses'}]
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', 'red'],
None,
'blue')
rule.execute_rule()
self.assertEqual([{'red': 'white', 'blue': None},
{'blue': None, 'red': 'roses'}],
props.get('far'))
def test_replace_rule_str(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one', 'bar': 'two'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
props.get('far'))
rule.execute_rule()
self.assertEqual('one', props.get('bar'))
self.assertEqual('one', props.get('far'))
def test_replace_rule_str_value_path_error(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one', 'bar': 'two'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
value_path=['far'])
ex = self.assertRaises(ValueError, rule.execute_rule)
self.assertEqual('Cannot use bar and far at the same time.',
six.text_type(ex))
def test_replace_rule_str_value_path(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
value_path=['far'])
rule.execute_rule()
self.assertEqual('one', props.get('bar'))
self.assertIsNone(props.get('far'))
def test_replace_rule_str_invalid(self):
schema = {
'far': properties.Schema(properties.Schema.STRING),
'bar': properties.Schema(properties.Schema.INTEGER)
}
data = {'far': 'one', 'bar': 2}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['bar'],
props.get('far'))
rule.execute_rule()
exc = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual("Property error: bar: Value 'one' is not an integer",
six.text_type(exc))
def test_delete_rule_list(self):
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
)}
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
['far', 'red'])
rule.execute_rule()
self.assertEqual([{'red': None}, {'red': None}], props.get('far'))
def test_delete_rule_other(self):
schema = {
'far': properties.Schema(properties.Schema.STRING)
}
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
['far'])
rule.execute_rule()
self.assertIsNone(props.get('far'))
def _test_resolve_rule(self, is_list=False):
class FakeClientPlugin(object):
def find_name_id(self, entity=None,
src_value='far'):
if entity == 'rose':
return 'pink'
return 'yellow'
if is_list:
schema = {
'far': properties.Schema(
properties.Schema.LIST,
schema=properties.Schema(
properties.Schema.MAP,
schema={
'red': properties.Schema(
properties.Schema.STRING
)
}
)
)}
else:
schema = {
'far': properties.Schema(properties.Schema.STRING)
}
return FakeClientPlugin(), schema
def test_resolve_rule_list_populated(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
data = {
'far': [{'red': 'blue'},
{'red': 'roses'}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([{'red': 'yellow'}, {'red': 'yellow'}],
props.get('far'))
def test_resolve_rule_list_with_function(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
join_func = cfn_funcs.Join(None,
'Fn::Join', ['.', ['bar', 'baz']])
data = {
'far': [{'red': 'blue'},
{'red': join_func}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([{'red': 'yellow'}, {'red': 'yellow'}],
props.get('far'))
def test_resolve_rule_list_with_ref(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
ref = cfn_funcs.ResourceRef(stack, 'get_resource',
'another_res')
data = {
'far': [{'red': ref}],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_list_empty(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
data = {
'far': [],
}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far', 'red'],
client_plugin=client_plugin,
finder='find_name_id'
)
rule.execute_rule()
self.assertEqual([], props.get('far'))
def test_resolve_rule_other(self):
client_plugin, schema = self._test_resolve_rule()
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual('yellow', props.get('far'))
def test_resolve_rule_other_with_ref(self):
client_plugin, schema = self._test_resolve_rule()
class rsrc(object):
action = INIT = "INIT"
class DummyStack(dict):
pass
stack = DummyStack(another_res=rsrc())
ref = cfn_funcs.ResourceRef(stack, 'get_resource',
'another_res')
data = {'far': ref}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_other_with_function(self):
client_plugin, schema = self._test_resolve_rule()
join_func = cfn_funcs.Join(None,
'Fn::Join', ['.', ['bar', 'baz']])
data = {'far': join_func}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule()
self.assertEqual(data, props.data)
def test_resolve_rule_other_with_get_attr(self):
client_plugin, schema = self._test_resolve_rule()
class DummyStack(dict):
pass
class rsrc(object):
pass
stack = DummyStack(another_res=rsrc())
attr_func = cfn_funcs.GetAtt(stack, 'Fn::GetAtt',
['another_res', 'name'])
data = {'far': attr_func}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id')
rule.execute_rule(client_resolve=False)
self.assertEqual(data, props.data)
mock_getatt = self.patchobject(attr_func, 'result',
return_value='rose')
rule.execute_rule()
self.assertEqual('pink', props.get('far'))
self.assertEqual(1, mock_getatt.call_count)
def test_resolve_rule_other_with_entity(self):
client_plugin, schema = self._test_resolve_rule()
data = {'far': 'one'}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
['far'],
client_plugin=client_plugin,
finder='find_name_id',
entity='rose')
rule.execute_rule()
self.assertEqual('pink', props.get('far'))
def test_property_json_param_correct_translation(self):
"""Test case when property with sub-schema takes json param."""
schema = {
'far': properties.Schema(properties.Schema.MAP,
schema={
'bar': properties.Schema(
properties.Schema.STRING,
),
'dar': properties.Schema(
properties.Schema.STRING
)
})
}
class DummyStack(dict):
@property
def parameters(self):
return mock.Mock()
param = hot_funcs.GetParam(DummyStack(json_far='json_far'),
'get_param',
'json_far')
param.parameters = {
'json_far': parameters.JsonParam(
'json_far',
{'Type': 'Json'},
'{"dar": "rad"}').value()}
data = {'far': param}
props = properties.Properties(schema, data)
rule = translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
['far', | |
= mask_ds.GetRasterBand(1)
src_ds = gdal.Open(str(line_params.params.in_df.path))
cell_areas = _get_cell_areas(
line_params.y, line_params.lat, line_params.win_ysize,
line_params.image_info
)
results = []
for x in range(
0, line_params.image_info.x_size, line_params.image_info.x_block_size
):
if x + line_params.image_info.x_block_size < line_params.image_info.x_size:
win_xsize = line_params.image_info.x_block_size
else:
win_xsize = line_params.image_info.x_size - x
logger.debug('image_info: %s', line_params.image_info)
logger.debug('x %s, win_xsize %s', x, win_xsize)
src_array = src_ds.ReadAsArray(
xoff=x,
yoff=line_params.y,
xsize=win_xsize,
ysize=line_params.win_ysize
)
mask_array = mask_band.ReadAsArray(
xoff=x,
yoff=line_params.y,
win_xsize=win_xsize,
win_ysize=line_params.win_ysize
)
mask_array = mask_array == MASK_VALUE
result = _process_block(
line_params.params, src_array, mask_array, x, line_params.y,
cell_areas
)
results.append(result)
return results
def _get_n_pop_band_for_type(dfs, pop_type):
n_bands = 0
for df in dfs:
n_bands += len(
df.indices_for_name(
POPULATION_BAND_NAME, field='type', field_filter=pop_type
)
)
return n_bands
def _get_n_spi_bands(dfs):
n_bands = 0
for df in dfs:
n_bands += len(df.indices_for_name(SPI_BAND_NAME))
return n_bands
def _have_pop_by_sex(in_dfs):
n_spi_bands = _get_n_spi_bands(in_dfs)
n_total_pop_bands = _get_n_pop_band_for_type(in_dfs, 'total')
n_female_pop_bands = _get_n_pop_band_for_type(in_dfs, 'female')
n_male_pop_bands = _get_n_pop_band_for_type(in_dfs, 'male')
logger.debug(
'n_total_pop_bands %s, n_female_pop_bands %s, n_male_pop_bands %s',
n_total_pop_bands, n_female_pop_bands, n_male_pop_bands
)
assert (
n_spi_bands == n_total_pop_bands
or n_spi_bands * 2 == (n_male_pop_bands + n_female_pop_bands)
)
if n_male_pop_bands >= 1:
return True
else:
return False
class DroughtSummary:
def __init__(self, params: DroughtSummaryParams):
self.params = params
self.image_info = util.get_image_info(self.params.in_df.path)
def is_killed(self):
return False
def emit_progress(self, *args):
'''Reimplement to display progress messages'''
util.log_progress(
*args,
message=f'Processing drought summary for {self.params.in_df.path}'
)
def get_line_params(self):
'''Make a list of parameters to use in the _process_line function'''
# Set initial lat to the top left corner latitude
src_ds = gdal.Open(str(self.params.in_df.path))
src_gt = src_ds.GetGeoTransform()
lat = src_gt[3]
logger.debug(
'getting line params for image with xsize '
'%s, and ysize %s', src_ds.RasterXSize, src_ds.RasterYSize
)
line_params = []
for y in range(
0, self.image_info.y_size, self.image_info.y_block_size
):
if y + self.image_info.y_block_size < self.image_info.y_size:
win_ysize = self.image_info.y_block_size
else:
win_ysize = self.image_info.y_size - y
line_params.append(
LineParams(self.params, self.image_info, y, win_ysize, lat)
)
lat += self.image_info.pixel_height * win_ysize
return line_params
def process_lines(self, line_params_list):
out_ds = self._get_out_ds()
out = []
for n, line_params in enumerate(line_params_list):
self.emit_progress(n / len(line_params_list))
results = _process_line(line_params)
for result in results:
out.append(result[0])
for key, value in result[1].items():
out_ds.GetRasterBand(key).WriteArray(**value)
out = _accumulate_drought_summary_tables(out)
return out
def _get_out_ds(self):
n_out_bands = int(
2 * math.ceil(
len(self.params.in_df.indices_for_name(SPI_BAND_NAME)) /
self.params.drought_period
)
)
if _have_pop_by_sex([self.params.in_df]):
# If have population disaggregated by sex, then the total
# population at max drought layer is written for each period except
# for the last, which also includes male/female totals - so need
# two more out bands
n_out_bands += 2
out_ds = util.setup_output_image(
self.params.in_df.path, self.params.out_file, n_out_bands,
self.image_info
)
return out_ds
def _get_population_band_instance(population_type, year_initial, year_final):
return Band(
name=POP_AT_SPI_MIN_OVER_PERIOD_BAND_NAME,
no_data_value=NODATA_VALUE,
metadata={
'year_initial': year_initial,
'year_final': year_final,
'type': population_type
},
activated=True
)
def summarise_drought_vulnerability(
drought_job: Job,
aoi: AOI,
job_output_path: Path,
n_cpus: int = multiprocessing.cpu_count() - 1
) -> Job:
logger.debug('at top of summarise_drought_vulnerability')
params = drought_job.params
drought_period = 4
spi_dfs = _prepare_dfs(
params['layer_spi_path'], params['layer_spi_bands'],
params['layer_spi_band_indices']
)
population_dfs = _prepare_dfs(
params['layer_population_path'], params['layer_population_bands'],
params['layer_population_band_indices']
)
jrc_df = _prepare_dfs(
params['layer_jrc_path'], [params['layer_jrc_band']],
[params['layer_jrc_band_index']]
)
if params.get('layer_water_path') is not None:
# Water layers are optional - if not provided then water won't be
# masked
water_df = _prepare_dfs(
params['layer_water_path'], [params['layer_water_band']],
[params['layer_water_band_index']]
)
else:
water_df = []
summary_table, out_path = _compute_drought_summary_table(
aoi=aoi,
compute_bbs_from=params['layer_spi_path'],
output_job_path=job_output_path.parent /
f"{job_output_path.stem}.json",
in_dfs=spi_dfs + population_dfs + jrc_df + water_df,
drought_period=drought_period,
n_cpus=n_cpus
)
out_bands = []
logger.info(
f"Processing for years {params['layer_spi_years'][0]} - "
f"{int(params['layer_spi_years'][-1])}"
)
year_initials = [
*range(
int(params['layer_spi_years'][0]),
int(params['layer_spi_years'][-1]), drought_period
)
]
for period_number, year_initial in enumerate(year_initials):
if (year_initial + drought_period - 1) > params['layer_spi_years'][-1]:
year_final = params['layer_spi_years'][-1]
else:
year_final = year_initial + drought_period - 1
out_bands.append(
Band(
name=SPI_MIN_OVER_PERIOD_BAND_NAME,
no_data_value=NODATA_VALUE,
metadata={
'year_initial': year_initial,
'year_final': year_final,
'lag': int(params['layer_spi_lag'])
},
activated=True
)
)
out_bands.append(
_get_population_band_instance('total', year_initial, year_final)
)
if _have_pop_by_sex(spi_dfs + population_dfs
) and period_number == (len(year_initials) - 1):
out_bands.append(
_get_population_band_instance(
'female', year_initial, year_final
)
)
out_bands.append(
_get_population_band_instance(
'male', year_initial, year_final
)
)
out_df = DataFile(out_path.name, out_bands)
# Also save bands to a key file for ease of use in PRAIS
key_json = job_output_path.parent / f"{job_output_path.stem}_band_key.json"
with open(key_json, 'w') as f:
json.dump(DataFile.Schema().dump(out_df), f, indent=4)
summary_json_output_path = job_output_path.parent / f"{job_output_path.stem}_summary.json"
report_json = save_reporting_json(
summary_json_output_path, summary_table, drought_job.params,
drought_job.task_name, aoi
)
summary_table_output_path = job_output_path.parent / f"{job_output_path.stem}_summary.xlsx"
save_summary_table_excel(
summary_table_output_path,
summary_table,
years=[int(y) for y in params['layer_spi_years']]
)
drought_job.results = RasterResults(
name='drought_vulnerability_summary',
uri=URI(uri=out_path, type='local'),
rasters={
DataType.INT16.value:
Raster(
uri=URI(uri=out_path, type='local'),
bands=out_df.bands,
datatype=DataType.INT16,
filetype=RasterFileType.COG
)
},
data={'report': report_json}
)
drought_job.end_date = dt.datetime.now(dt.timezone.utc)
drought_job.progress = 100
return drought_job
def _prepare_dfs(path, band_str_list, band_indices) -> List[DataFile]:
dfs = []
for band_str, band_index in zip(band_str_list, band_indices):
band = Band(**band_str)
dfs.append(
DataFile(path=util.save_vrt(path, band_index), bands=[band])
)
return dfs
def _aoi_process_multiprocess(inputs, n_cpus):
with multiprocessing.Pool(n_cpus) as p:
n = 0
results = []
for output in p.imap_unordered(_summarize_tile, inputs):
util.log_progress(
n / len(inputs),
message='Processing drought summaries overall progress'
)
error_message = output[1]
if error_message is not None:
p.terminate()
break
results.append(output[0])
n += 1
return results
def _aoi_process_sequential(inputs):
results = []
for item in inputs:
n = 0
output = _summarize_tile(item)
util.log_progress(
n / len(inputs),
message='Processing drought summaries overall progress'
)
error_message = output[1]
if error_message is not None:
break
results.append(output[0])
n += 1
return results
def _summarize_over_aoi(
wkt_aoi,
pixel_aligned_bbox,
in_dfs: List[DataFile],
output_tif_path: Path,
mask_worker_process_name,
drought_worker_process_name,
drought_period: int,
n_cpus: int,
translate_worker_function: Callable = None,
translate_worker_params: dict = None,
mask_worker_function: Callable = None,
mask_worker_params: dict = None,
drought_worker_function: Callable = None,
drought_worker_params: dict = None
) -> Tuple[Optional[SummaryTableDrought], str]:
# Combine all raster into a VRT and crop to the AOI
indic_vrt = tempfile.NamedTemporaryFile(
suffix='_drought_indicators.vrt', delete=False
).name
indic_vrt = Path(indic_vrt)
logger.info(u'Saving indicator VRT to {}'.format(indic_vrt))
# The plus one is because band numbers start at 1, not zero
gdal.BuildVRT(
str(indic_vrt), [item.path for item in in_dfs],
outputBounds=pixel_aligned_bbox,
resolution='highest',
resampleAlg=gdal.GRA_NearestNeighbour,
separate=True
)
indic_reproj = tempfile.NamedTemporaryFile(
suffix='_drought_indicators_reproj_tiles.tif', delete=False
).name
indic_reproj = Path(indic_reproj)
logger.info(f'Reprojecting inputs and saving to {indic_reproj}')
error_message = ""
if translate_worker_function:
tiles = translate_worker_function(
indic_vrt, str(indic_reproj), **translate_worker_params
)
else:
translate_worker = workers.CutTiles(
indic_vrt, n_cpus, indic_reproj, gdal.GDT_Int32
)
tiles = translate_worker.work()
logger.debug('Tiles are %s', tiles)
if tiles:
out_files = [
output_tif_path.parent / (output_tif_path.stem + f'_{n}.tif')
for n in range(len(tiles))
]
inputs = [
SummarizeTileInputs(
tile=tile,
out_file=out_file,
aoi=wkt_aoi,
drought_period=drought_period,
in_dfs=in_dfs,
mask_worker_function=mask_worker_function,
mask_worker_params=mask_worker_params,
drought_worker_function=drought_worker_function,
drought_worker_params=drought_worker_params
) for tile, out_file in zip(tiles, out_files)
]
if n_cpus > 1:
results = _aoi_process_multiprocess(inputs, n_cpus)
else:
results = _aoi_process_sequential(inputs)
results = _accumulate_drought_summary_tables(results)
else:
error_message = "Error reprojecting layers."
results = None
return results, out_files, error_message
@dataclasses.dataclass()
class SummarizeTileInputs:
tile: Path
out_file: Path
aoi: str
drought_period: int
in_dfs: List[DataFile]
mask_worker_function: Callable = None
mask_worker_params: dict = None
drought_worker_function: Callable = None
drought_worker_params: dict = None
def _summarize_tile(tile_input):
logger.info('Processing tile %s', tile_input.tile)
# Compute a mask layer that will be used in the tabulation code to
# mask out areas outside of the AOI. Do this instead of using
# gdal.Clip to save having to clip and rewrite all of the layers in
# the VRT
mask_tif = tempfile.NamedTemporaryFile(
suffix='_drought_mask.tif', delete=False
).name
logger.info(f'Saving mask to {mask_tif}')
geojson = util.wkt_geom_to_geojson_file_string(tile_input.aoi)
error_message = None
if tile_input.mask_worker_function:
mask_result = tile_input.mask_worker_function(
mask_tif, geojson, str(tile_input.tile),
**tile_input.mask_worker_params
)
else:
mask_worker = workers.Mask(mask_tif, geojson, str(tile_input.tile))
mask_result = mask_worker.work()
if mask_result:
# Combine all in_dfs together and update path to refer to indicator
# VRT
in_df = DataFile(
tile_input.tile, [b for d in tile_input.in_dfs for b in d.bands]
)
params = DroughtSummaryParams(
in_df=in_df,
out_file=str(tile_input.out_file),
mask_file=mask_tif,
drought_period=tile_input.drought_period
)
logger.info(
'Calculating summary table and saving '
f'rasters to {tile_input.out_file}'
)
if tile_input.drought_worker_function:
result = tile_input.drought_worker_function(
params, **tile_input.drought_worker_params
)
else:
summarizer = DroughtSummary(params)
result = summarizer.process_lines(summarizer.get_line_params())
if not result:
if result.is_killed():
error_message = (
"Cancelled calculation of summary "
f"table for {tile_input.tile}."
)
else:
error_message = (
f"Error calculating summary table for {tile_input.tile}."
)
result = None
else:
result.cast_to_cpython()
else:
error_message = f"Error creating mask for tile {tile_input.tile}."
result = None
return result, error_message
def _compute_drought_summary_table(
aoi, compute_bbs_from, in_dfs, output_job_path: Path, drought_period: int,
n_cpus: int
) -> Tuple[SummaryTableDrought, Path, Path]:
"""Computes summary table and the output tif file(s)"""
wkt_aois = aoi.meridian_split(as_extent=False, out_format='wkt')
bbs = aoi.get_aligned_output_bounds(compute_bbs_from)
assert len(wkt_aois) == len(bbs)
output_name_pattern = {
1: f"{output_job_path.stem}" + ".tif",
2: f"{output_job_path.stem}" + "_{index}.tif"
}[len(wkt_aois)]
mask_name_fragment = {
1: "Generating mask",
2: "Generating mask (part {index} of 2)",
}[len(wkt_aois)]
drought_name_fragment = {
1: "Calculating summary table",
2: "Calculating summary table (part {index} of 2)",
}[len(wkt_aois)]
summary_tables = | |
from parsimonious.grammar import Grammar
# https://docs.python.org/3/reference/grammar.html
# 3.10.3 Documentation » The Python Language Reference » 10. Full Grammar specification
YiPGrammar = Grammar("""
# PEG grammar for Yi (Python) syntax
file = statements? # ENDMARKER
interactive = statement_newline
eval = expressions NEWLINE* # ENDMARKER
# func_type = "(" (type_expressions)? ")" "->" expression NEWLINE* # ENDMARKER
fstring = star_expressions
# type_expressions allow */** but ignore them
type_expressions =
((expression ("," expression)*) "," "*" expression "," "**" expression)
/ ((expression ("," expression)*) "," "*" expression)
/ ((expression ("," expression)*) "," "**" expression)
/ ("*" expression "," "**" expression)
/ ("*" expression)
/ ("**" expression)
/ (expression ("," expression)*)
statements = statement+
statement = compound_stmt / simple_stmts
statement_newline =
(compound_stmt NEWLINE)
/ simple_stmts
/ NEWLINE
# / ENDMARKER
simple_stmts =
(simple_stmt !";" NEWLINE) # Not needed, there for speedup
/ ((simple_stmt (";" simple_stmt)*) (";")? NEWLINE)
# NOTE = assignment MUST precede expression, else parsing a simple assignment
# will throw a SyntaxError.
simple_stmt =
assignment
/ star_expressions
/ return_stmt
/ import_stmt
/ raise_stmt
/ "pass"
/ del_stmt
/ yield_stmt
/ assert_stmt
/ "break"
/ "continue"
/ global_stmt
/ nonlocal_stmt
compound_stmt =
function_def
/ if_stmt
/ class_def
/ with_stmt
/ for_stmt
/ try_stmt
/ while_stmt
/ match_stmt
# NOTE: annotated_rhs may start with "yield"; yield_expr must start with "yield"
assignment =
(NAME ":" expression ("=" annotated_rhs )?)
/ ((("(" single_target ")")
/ single_subscript_attribute_target) ":" expression ("=" annotated_rhs )?)
/ ((star_targets "=" )+ (yield_expr / star_expressions) !"=" (TYPE_COMMENT)?)
/ (single_target augassign (yield_expr / star_expressions))
augassign =
"+="
/ "-="
/ "*="
/ "@="
/ "/="
/ "%="
/ "&="
/ "|="
/ "^="
/ "<<="
/ ">>="
/ "**="
/ "//="
global_stmt = "global" (NAME ("," NAME)*)
nonlocal_stmt = "nonlocal" (NAME ("," NAME)*)
yield_stmt = yield_expr
assert_stmt = ("assert" expression ("," expression )?)
del_stmt =
("del" del_targets &(";" / NEWLINE))
import_stmt = import_name / import_from
import_name = ("import" dotted_as_names)
# note below = the ("." | "...") is necessary because "..." is tokenized as ELLIPSIS
import_from =
("from" ("..." / ".")* dotted_name "import" import_from_targets)
/ ("from" ("..." / ".")+ "import" import_from_targets)
import_from_targets =
("(" import_from_as_names (",")? ")")
/ (import_from_as_names !",")
/ "*"
import_from_as_names =
(import_from_as_name ("," import_from_as_name)*)
import_from_as_name =
NAME ("as" NAME )?
dotted_as_names =
(dotted_as_name ("," dotted_as_name)*)
dotted_as_name =
(dotted_name ("as" NAME )?)
dotted_name =
(NAME ("." NAME)+)
/ NAME
if_stmt =
("if" named_expression ":" block elif_stmt)
/ ("if" named_expression ":" block (else_block)?)
elif_stmt =
("elif" named_expression ":" block elif_stmt)
/ ("elif" named_expression ":" block (else_block)?)
else_block =
("else" ":" block)
while_stmt =
("while" named_expression ":" block (else_block)?)
for_stmt =
("for" star_targets "in" star_expressions ":" (TYPE_COMMENT)? block (else_block)?)
/ (ASYNC "for" star_targets "in" star_expressions ":" (TYPE_COMMENT)? block (else_block)?)
with_stmt =
("with" "(" (with_item ("," with_item)*) ","? ")" ":" block)
/ ("with" (with_item ("," with_item)*) ":" (TYPE_COMMENT)? block)
/ (ASYNC "with" "(" (with_item ("," with_item)*) ","? ")" ":" block)
/ (ASYNC "with" (with_item ("," with_item)*) ":" (TYPE_COMMENT)? block)
with_item =
(expression "as" star_target &("," / ")" / ":"))
/ (expression)
try_stmt =
("try" ":" block finally_block)
/ ("try" ":" block except_block+ (else_block)? (finally_block)?)
except_block =
("except" expression ("as" NAME )? ":" block)
/ ("except" ":" block)
finally_block =
("finally" ":" block)
match_stmt =
("match" subject_expr ":" NEWLINE INDENT case_block+ DEDENT)
subject_expr =
(star_named_expression "," star_named_expressions?)
/ (named_expression)
case_block =
("case" patterns guard? ":" block)
guard = "if" named_expression
patterns =
(open_sequence_pattern)
/ (pattern)
pattern =
(as_pattern)
/ (or_pattern)
as_pattern =
(or_pattern "as" pattern_capture_target)
or_pattern =
((closed_pattern ("|" closed_pattern)*))
closed_pattern =
(literal_pattern)
/ (capture_pattern)
/ (wildcard_pattern)
/ (value_pattern)
/ (group_pattern)
/ (sequence_pattern)
/ (mapping_pattern)
/ (class_pattern)
# Literal patterns are used for equality and identity constraints
literal_pattern =
(signed_number !("+" / "-"))
/ (complex_number)
/ (strings)
/ ("None")
/ ("True")
/ ("False")
# Literal expressions are used to restrict permitted mapping pattern keys
literal_expr =
(signed_number !("+" / "-"))
/ (complex_number)
/ (strings)
/ ("None")
/ ("True")
/ ("False")
complex_number =
(signed_real_number "+" imaginary_number)
/ (signed_real_number "-" imaginary_number)
signed_number =
(NUMBER)
/ ("-" NUMBER)
signed_real_number =
(real_number)
/ ("-" real_number)
real_number =
(NUMBER)
imaginary_number =
(NUMBER)
capture_pattern =
(pattern_capture_target)
pattern_capture_target =
(!"_" NAME !("." / "(" / "="))
wildcard_pattern =
("_")
value_pattern =
(attr !("." / "(" / "="))
attr =
(name_or_attr "." NAME)
name_or_attr =
(attr)
/ (NAME)
group_pattern =
("(" pattern ")")
sequence_pattern =
("[" maybe_sequence_pattern? "]")
/ ("(" open_sequence_pattern? ")")
open_sequence_pattern =
(maybe_star_pattern "," maybe_sequence_pattern?)
maybe_sequence_pattern =
((maybe_star_pattern ("," maybe_star_pattern)*) ","?)
maybe_star_pattern =
(star_pattern)
/ (pattern)
star_pattern =
("*" pattern_capture_target)
/ ("*" wildcard_pattern)
mapping_pattern =
("{" "}")
/ ("{" double_star_pattern ","? "}")
/ ("{" items_pattern "," double_star_pattern ","? "}")
/ ("{" items_pattern ","? "}")
items_pattern =
((key_value_pattern ("," key_value_pattern)*))
key_value_pattern =
((literal_expr / attr) ":" pattern)
double_star_pattern =
("**" pattern_capture_target)
class_pattern =
(name_or_attr "(" ")")
/ (name_or_attr "(" positional_patterns ","? ")")
/ (name_or_attr "(" keyword_patterns ","? ")")
/ (name_or_attr "(" positional_patterns "," keyword_patterns ","? ")")
positional_patterns =
((pattern ("," pattern)*))
keyword_patterns =
((keyword_pattern ("," keyword_pattern)*))
keyword_pattern =
(NAME "=" pattern)
return_stmt =
("return" (star_expressions)?)
raise_stmt =
("raise" expression ("from" expression )?)
/ ("raise")
function_def =
(decorators function_def_raw)
/ (function_def_raw)
function_def_raw =
("def" NAME "(" (params)? ")" ("->" expression )? ":" (func_type_comment)? block)
/ (ASYNC "def" NAME "(" (params)? ")" ("->" expression )? ":" (func_type_comment)? block)
func_type_comment =
(NEWLINE TYPE_COMMENT &(NEWLINE INDENT)) # Must be followed by indented block
/ (TYPE_COMMENT)
params =
(parameters)
parameters =
(slash_no_default param_no_default* param_with_default* (star_etc)?)
/ (slash_with_default param_with_default* (star_etc)?)
/ (param_no_default+ param_with_default* (star_etc)?)
/ (param_with_default+ (star_etc)?)
/ (star_etc)
# Some duplication here because we can't write ("," / &")"),
# which is because we don't support empty alternatives (yet).
#
slash_no_default =
(param_no_default+ "/" ",")
/ (param_no_default+ "/" &")")
slash_with_default =
(param_no_default* param_with_default+ "/" ",")
/ (param_no_default* param_with_default+ "/" &")")
star_etc =
("*" param_no_default param_maybe_default* (kwds)?)
/ ("*" "," param_maybe_default+ (kwds)?)
/ (kwds)
kwds = "**" param_no_default
# One parameter. This *includes* a following comma and type comment.
#
# There are three styles:
# - No default
# - With default
# - Maybe with default
#
# There are two alternative forms of each, to deal with type comments:
# - Ends in a comma followed by an optional type comment
# - No comma, optional type comment, must be followed by close paren
# The latter form is for a final parameter without trailing comma.
#
param_no_default =
(param "," TYPE_COMMENT?)
/ (param TYPE_COMMENT? &")")
param_with_default =
(param default_expr "," TYPE_COMMENT?)
/ (param default_expr TYPE_COMMENT? &")")
param_maybe_default =
(param default_expr? "," TYPE_COMMENT?)
/ (param default_expr? TYPE_COMMENT? &")")
param = NAME annotation?
annotation = ":" expression
default_expr = "=" expression
decorators = ("@" named_expression NEWLINE )+
class_def =
(decorators ClassDeclaration)
/ (ClassDeclaration)
ClassDeclaration =
("class" NAME ("(" arguments? ")")? ":" block)
block =
(NEWLINE INDENT statements DEDENT)
/ (simple_stmts)
star_expressions =
(star_expression ("," star_expression )+ (",")?)
/ (star_expression ",")
/ (star_expression)
star_expression =
("*" bitwise_or)
/ (expression)
star_named_expressions = (star_named_expression ("," star_named_expression)*) (",")?
star_named_expression =
("*" bitwise_or)
/ (named_expression)
assignment_expression =
(NAME ":=" expression)
named_expression =
(assignment_expression)
/ (expression !":=")
annotated_rhs = yield_expr / star_expressions
expressions =
(expression ("," expression )+ (",")?)
/ (expression ",")
/ (expression)
expression =
(disjunction "if" disjunction "else" expression)
/ (disjunction)
/ (lambdef)
lambdef =
("lambda" (lambda_params)? ":" expression)
lambda_params =
(lambda_parameters)
# lambda_parameters etc. duplicates parameters but without annotations
# or type comments, and if there's no comma after a parameter, we expect
# a colon, not a close parenthesis. (For more, see parameters above.)
#
lambda_parameters =
(lambda_slash_no_default lambda_param_no_default* lambda_param_with_default* (lambda_star_etc)?)
/ (lambda_slash_with_default lambda_param_with_default* (lambda_star_etc)?)
/ (lambda_param_no_default+ lambda_param_with_default* (lambda_star_etc)?)
/ (lambda_param_with_default+ (lambda_star_etc)?)
/ (lambda_star_etc)
lambda_slash_no_default =
(lambda_param_no_default+ "/" ",")
/ (lambda_param_no_default+ "/" &":")
lambda_slash_with_default =
(lambda_param_no_default* lambda_param_with_default+ "/" ",")
/ (lambda_param_no_default* lambda_param_with_default+ "/" &":")
lambda_star_etc =
("*" lambda_param_no_default lambda_param_maybe_default* (lambda_kwds)?)
/ ("*" "," lambda_param_maybe_default+ (lambda_kwds)?)
/ (lambda_kwds)
lambda_kwds = "**" lambda_param_no_default
lambda_param_no_default =
(lambda_param ",")
/ (lambda_param &":")
lambda_param_with_default =
(lambda_param default_expr ",")
/ (lambda_param default_expr &":")
lambda_param_maybe_default =
(lambda_param default_expr? ",")
/ (lambda_param default_expr? &":")
lambda_param = NAME
disjunction =
(conjunction ("or" conjunction )+)
/ (conjunction)
conjunction =
(inversion ("and" inversion )+)
/ (inversion)
inversion =
("not" inversion)
/ (comparison)
comparison =
(bitwise_or compare_op_bitwise_or_pair+)
/ (bitwise_or)
compare_op_bitwise_or_pair | |
import tensorflow as tf
import algos_tf14.models
from common import tr_helpers, experience, env_configurations
import numpy as np
import collections
import time
from collections import deque
from tensorboardX import SummaryWriter
from datetime import datetime
from algos_tf14.tensorflow_utils import TensorFlowVariables
from common.categorical import CategoricalQ
class DQNAgent:
def __init__(self, sess, base_name, observation_space, action_space, config, logger, central_state_space=None):
observation_shape = observation_space.shape
actions_num = action_space.n
self.config = config
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.games_to_track = tr_helpers.get_or_default(config, 'games_to_track', 100)
self.max_epochs = tr_helpers.get_or_default(self.config, 'max_epochs', 1e6)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
if self.is_adaptive_lr:
self.lr_threshold = config['lr_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, end_learning_rate=0.001, power=tr_helpers.get_or_default(config, 'decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, decay_rate = config['decay_rate'])
self.env_name = config['env_name']
self.network = config['network']
self.state_shape = observation_shape
self.actions_num = actions_num
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("%d, %H:%M:%S"))
self.epsilon = self.config['epsilon']
self.rewards_shaper = self.config['reward_shaper']
self.epsilon_processor = tr_helpers.LinearValueProcessor(self.config['epsilon'], self.config['min_epsilon'], self.config['epsilon_decay_frames'])
self.beta_processor = tr_helpers.LinearValueProcessor(self.config['priority_beta'], self.config['max_beta'], self.config['beta_decay_frames'])
if self.env_name:
self.env = env_configurations.configurations[self.env_name]['env_creator'](name=config['name'])
self.sess = sess
self.steps_num = self.config['steps_num']
self.states = deque([], maxlen=self.steps_num)
self.is_prioritized = config['replay_buffer_type'] != 'normal'
self.atoms_num = self.config['atoms_num']
self.is_categorical = self.atoms_num > 1
if self.is_categorical:
self.v_min = self.config['v_min']
self.v_max = self.config['v_max']
self.delta_z = (self.v_max - self.v_min) / (self.atoms_num - 1)
self.all_z = tf.range(self.v_min, self.v_max + self.delta_z, self.delta_z)
self.categorical = CategoricalQ(self.atoms_num, self.v_min, self.v_max)
self.n_agents = self.env.env_info['n_agents']
if not self.is_prioritized:
self.exp_buffer = experience.ReplayBuffer(config['replay_buffer_size'], observation_space, self.n_agents)
else:
self.exp_buffer = experience.PrioritizedReplayBuffer(config['replay_buffer_size'], config['priority_alpha'], observation_space, self.n_agents)
self.sample_weights_ph = tf.placeholder(tf.float32, shape= [None,] , name='sample_weights')
self.obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'obs_ph')
self.actions_ph = tf.placeholder(tf.int32, shape=[None,], name = 'actions_ph')
self.rewards_ph = tf.placeholder(tf.float32, shape=[None,], name = 'rewards_ph')
self.next_obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'next_obs_ph')
self.is_done_ph = tf.placeholder(tf.float32, shape=[None,], name = 'is_done_ph')
self.is_not_done = 1 - self.is_done_ph
self.name = base_name
self.gamma = self.config['gamma']
self.gamma_step = self.gamma**self.steps_num
self.input_obs = self.obs_ph
self.input_next_obs = self.next_obs_ph
if observation_space.dtype == np.uint8:
print('scaling obs')
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_next_obs = tf.to_float(self.input_next_obs) / 255.0
if self.atoms_num == 1:
self.setup_qvalues(actions_num)
else:
self.setup_cat_qvalues(actions_num)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
self.saver = tf.train.Saver()
self.assigns_op = [tf.assign(w_target, w_self, validate_shape=True) for w_self, w_target in zip(self.weights, self.target_weights)]
self.variables = TensorFlowVariables(self.qvalues, self.sess)
if self.env_name:
sess.run(tf.global_variables_initializer())
self._reset()
def _get_q(self, probs):
res = probs * self.all_z
return tf.reduce_sum(res, axis=2)
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def setup_cat_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.logits = self.network(config, reuse=False)
self.qvalues_c = tf.nn.softmax(self.logits, axis = 2)
self.qvalues = self._get_q(self.qvalues_c)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_logits = self.network(config, reuse=False)
self.target_qvalues_c = tf.nn.softmax(self.target_logits, axis = 2)
self.target_qvalues = self._get_q(self.target_qvalues_c)
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_logits = tf.stop_gradient(self.network(config, reuse=True))
self.next_qvalues_c = tf.nn.softmax(self.next_logits, axis = 2)
self.next_qvalues = self._get_q(self.next_qvalues_c)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_values = tf.reduce_sum(tf.expand_dims(tf.one_hot(self.actions_ph, actions_num), -1) * self.logits, reduction_indices = (1,))
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
else:
self.next_selected_actions = tf.argmax(self.target_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
self.proj_dir_ph = tf.placeholder(tf.float32, shape=[None, self.atoms_num], name = 'best_proj_dir')
log_probs = tf.nn.log_softmax( self.current_action_values, axis=1)
if self.is_prioritized:
# we need to return loss to update priority buffer
self.abs_errors = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1) + 1e-5
self.td_loss = self.abs_errors * self.sample_weights_ph
else:
self.td_loss = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1)
self.td_loss_mean = tf.reduce_mean(self.td_loss)
def setup_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.qvalues = self.network(config, reuse=False)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_qvalues = tf.stop_gradient(self.network(config, reuse=False))
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_qvalues = tf.stop_gradient(self.network(config, reuse=True))
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_qvalues = tf.reduce_sum(tf.one_hot(self.actions_ph, actions_num) * self.qvalues, reduction_indices = 1)
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( self.target_qvalues * self.next_selected_actions_onehot , reduction_indices=[1,] ))
else:
self.next_state_values_target = tf.stop_gradient(tf.reduce_max(self.target_qvalues, reduction_indices=1))
self.reference_qvalues = self.rewards_ph + self.gamma_step *self.is_not_done * self.next_state_values_target
if self.is_prioritized:
# we need to return l1 loss to update priority buffer
self.abs_errors = tf.abs(self.current_action_qvalues - self.reference_qvalues) + 1e-5
# the same as multiply gradients later (other way is used in different examples over internet)
self.td_loss = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.NONE) * self.sample_weights_ph
self.td_loss_mean = tf.reduce_mean(self.td_loss)
else:
self.td_loss_mean = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.MEAN)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
if self.env_name:
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def _reset(self):
self.states.clear()
if self.env_name:
self.state = self.env.reset()
self.total_reward = 0.0
self.total_shaped_reward = 0.0
self.step_count = 0
def get_qvalues(self, state):
return self.sess.run(self.qvalues, {self.obs_ph: state})
def get_action(self, state, epsilon=0.0):
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
qvals = self.get_qvalues([state])
action = np.argmax(qvals)
return action
def play_steps(self, steps, epsilon=0.0):
done_reward = None
done_shaped_reward = None
done_steps = None
steps_rewards = 0
cur_gamma = 1
cur_states_len = len(self.states)
# always break after one
while True:
if cur_states_len > 0:
state = self.states[-1][0]
else:
state = self.state
action = self.get_action(state, epsilon)
new_state, reward, is_done, _ = self.env.step(action)
#reward = reward * (1 - is_done)
self.step_count += 1
self.total_reward += reward
shaped_reward = self.rewards_shaper(reward)
self.total_shaped_reward += shaped_reward
self.states.append([new_state, action, shaped_reward])
if len(self.states) < steps:
break
for i in range(steps):
sreward = self.states[i][2]
steps_rewards += sreward * cur_gamma
cur_gamma = cur_gamma * self.gamma
next_state, current_action, _ = self.states[0]
self.exp_buffer.add(self.state, current_action, steps_rewards, new_state, is_done)
self.state = next_state
break
if is_done:
done_reward = self.total_reward
done_steps = self.step_count
done_shaped_reward = self.total_shaped_reward
self._reset()
return done_reward, done_shaped_reward, done_steps
def load_weigths_into_target_network(self):
self.sess.run(self.assigns_op)
def sample_batch(self, exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch
}
def sample_prioritized_batch(self, exp_replay, batch_size, beta):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch, sample_weights, sample_idxes = exp_replay.sample(batch_size, beta)
batch = { self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch, self.sample_weights_ph: sample_weights }
return [batch , sample_idxes]
def train(self):
mem_free_steps = 0
last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
self.load_weigths_into_target_network()
for _ in range(0, self.config['num_steps_fill_buffer']):
self.play_steps(self.steps_num, self.epsilon)
steps_per_epoch = self.config['steps_per_epoch']
num_epochs_to_copy = self.config['num_epochs_to_copy']
batch_size = self.config['batch_size']
lives_reward = self.config['lives_reward']
episodes_to_log = self.config['episodes_to_log']
frame = 0
play_time = 0
update_time = 0
rewards = []
shaped_rewards = []
steps = []
losses = deque([], maxlen=100)
while True:
epoch_num = self.update_epoch()
t_play_start = time.time()
self.epsilon = self.epsilon_processor(frame)
self.beta = self.beta_processor(frame)
for _ in range(0, steps_per_epoch):
reward, shaped_reward, step = self.play_steps(self.steps_num, self.epsilon)
if reward != None:
self.game_lengths.append(step)
self.game_rewards.append(reward)
#shaped_rewards.append(shaped_reward)
t_play_end = time.time()
play_time += t_play_end - t_play_start
# train
frame = frame + steps_per_epoch
t_start = time.time()
if self.is_categorical:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.steps_num)
batch[self.proj_dir_ph] = projected
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.steps_num)
batch[self.proj_dir_ph] = projected
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
else:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
losses.append(loss_t)
t_end = time.time()
update_time += t_end - t_start
total_time += update_time
if frame % 1000 == 0:
mem_free_steps += | |
OS '
'Independent\\nClassifier: Programming Language :: Python\\nClassifier: '
'Programming Language :: Python :: 2\\nClassifier: Programming Language :: '
'Python :: 2.7\\nClassifier: Programming Language :: Python :: '
'3\\nClassifier: Programming Language :: Python :: 3.5\\nClassifier: '
'Programming Language :: Python :: 3.6\\nClassifier: Programming Language :: '
'Python :: 3.7\\nClassifier: Programming Language :: Python :: '
'3.8\\nClassifier: Programming Language :: Python :: 3.9\\nClassifier: '
'Programming Language :: Python :: Implementation :: CPython\\nClassifier: '
'Programming Language :: Python :: Implementation :: PyPy\\nClassifier: '
'Topic :: Software Development :: Libraries :: Python '
'Modules\\nRequires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, '
'!=3.3.*\\nDescription-Content-Type: text/x-rst\\nProvides-Extra: '
'dev\\nRequires-Dist: coverage[toml] (>=5.0.2) ; extra == '
"'dev'\\nRequires-Dist: hypothesis ; extra == 'dev'\\nRequires-Dist: pympler "
"; extra == 'dev'\\nRequires-Dist: pytest (>=4.3.0) ; extra == "
"'dev'\\nRequires-Dist: six ; extra == 'dev'\\nRequires-Dist: zope.interface "
"; extra == 'dev'\\nRequires-Dist: sphinx ; extra == 'dev'\\nRequires-Dist: "
"sphinx-rtd-theme ; extra == 'dev'\\nRequires-Dist: pre-commit ; extra == "
"'dev'\\nProvides-Extra: docs\\nRequires-Dist: sphinx ; extra == "
"'docs'\\nRequires-Dist: sphinx-rtd-theme ; extra == 'docs'\\nRequires-Dist: "
"zope.interface ; extra == 'docs'\\nProvides-Extra: tests\\nRequires-Dist: "
"coverage[toml] (>=5.0.2) ; extra == 'tests'\\nRequires-Dist: hypothesis ; "
"extra == 'tests'\\nRequires-Dist: pympler ; extra == "
"'tests'\\nRequires-Dist: pytest (>=4.3.0) ; extra == "
"'tests'\\nRequires-Dist: six ; extra == 'tests'\\nRequires-Dist: "
"zope.interface ; extra == 'tests'\\nProvides-Extra: "
'tests_no_zope\\nRequires-Dist: coverage[toml] (>=5.0.2) ; extra == '
"'tests_no_zope'\\nRequires-Dist: hypothesis ; extra == "
"'tests_no_zope'\\nRequires-Dist: pympler ; extra == "
"'tests_no_zope'\\nRequires-Dist: pytest (>=4.3.0) ; extra == "
"'tests_no_zope'\\nRequires-Dist: six ; extra == 'tests_no_zope'\\n\\n.. "
'image:: https://www.attrs.org/en/latest/_static/attrs_logo.png\\n :alt: '
'attrs Logo\\n\\n======================================\\n``attrs``: Classes '
'Without Boilerplate\\n======================================\\n\\n.. '
'image:: https://readthedocs.org/projects/attrs/badge/?version=stable\\n '
':target: https://www.attrs.org/en/stable/?badge=stable\\n :alt: '
'Documentation Status\\n\\n.. image:: '
'https://github.com/python-attrs/attrs/workflows/CI/badge.svg?branch=master\\n '
':target: https://github.com/python-attrs/attrs/actions?workflow=CI\\n '
':alt: CI Status\\n\\n.. image:: '
'https://codecov.io/github/python-attrs/attrs/branch/master/graph/badge.svg\\n '
':target: https://codecov.io/github/python-attrs/attrs\\n :alt: Test '
'Coverage\\n\\n.. image:: '
'https://img.shields.io/badge/code%20style-black-000000.svg\\n :target: '
'https://github.com/psf/black\\n :alt: Code style: black\\n\\n.. '
'teaser-begin\\n\\n``attrs`` is the Python package that will bring back the '
'**joy** of **writing classes** by relieving you from the drudgery of '
'implementing object protocols (aka `dunder '
'<https://nedbatchelder.com/blog/200605/dunder.html>`_ methods).\\n\\nIts '
'main goal is to help you to write **concise** and **correct** software '
'without slowing down your code.\\n\\n.. teaser-end\\n\\nFor that, it gives '
'you a class decorator and a way to declaratively define the attributes on '
'that class:\\n\\n.. -code-begin-\\n\\n.. code-block:: pycon\\n\\n >>> '
'import attr\\n\\n >>> @attr.s\\n ... class SomeClass(object):\\n '
'... a_number = attr.ib(default=42)\\n ... list_of_numbers = '
'attr.ib(factory=list)\\n ...\\n ... def hard_math(self, '
'another_number):\\n ... return self.a_number + '
'sum(self.list_of_numbers) * another_number\\n\\n\\n >>> sc = SomeClass(1, '
'[1, 2, 3])\\n >>> sc\\n SomeClass(a_number=1, list_of_numbers=[1, 2, '
'3])\\n\\n >>> sc.hard_math(3)\\n 19\\n >>> sc == SomeClass(1, [1, 2, '
'3])\\n True\\n >>> sc != SomeClass(2, [3, 2, 1])\\n True\\n\\n >>> '
"attr.asdict(sc)\\n {'a_number': 1, 'list_of_numbers': [1, 2, 3]}\\n\\n "
'>>> SomeClass()\\n SomeClass(a_number=42, list_of_numbers=[])\\n\\n >>> '
'C = attr.make_class(\\"C\\", [\\"a\\", \\"b\\"])\\n >>> C(\\"foo\\", '
'\\"bar\\")\\n C(a=\'foo\', b=\'bar\')\\n\\n\\nAfter *declaring* your '
'attributes ``attrs`` gives you:\\n\\n- a concise and explicit overview of '
"the class's attributes,\\n- a nice human-readable ``__repr__``,\\n- a "
'complete set of comparison methods (equality and ordering),\\n- an '
'initializer,\\n- and much more,\\n\\n*without* writing dull boilerplate '
'code again and again and *without* runtime performance penalties.\\n\\nOn '
'Python 3.6 and later, you can often even drop the calls to ``attr.ib()`` by '
'using `type annotations '
'<https://www.attrs.org/en/latest/types.html>`_.\\n\\nThis gives you the '
'power to use actual classes with actual types in your code instead of '
'confusing ``tuple``\\\\ s or `confusingly behaving '
'<https://www.attrs.org/en/stable/why.html#namedtuples>`_ ``namedtuple``\\\\ '
's.\\nWhich in turn encourages you to write *small classes* that do `one '
'thing well <https://www.destroyallsoftware.com/talks/boundaries>`_.\\nNever '
'again violate the `single responsibility principle '
'<https://en.wikipedia.org/wiki/Single_responsibility_principle>`_ just '
'because implementing ``__init__`` et al is a painful drag.\\n\\n\\n.. '
'-getting-help-\\n\\nGetting Help\\n============\\n\\nPlease use the '
'``python-attrs`` tag on `StackOverflow '
'<https://stackoverflow.com/questions/tagged/python-attrs>`_ to get '
'help.\\n\\nAnswering questions of your fellow developers is also great way '
'to help the project!\\n\\n\\n.. -project-information-\\n\\nProject '
'Information\\n===================\\n\\n``attrs`` is released under the `MIT '
'<https://choosealicense.com/licenses/mit/>`_ license,\\nits documentation '
'lives at `Read the Docs <https://www.attrs.org/>`_,\\nthe code on `GitHub '
'<https://github.com/python-attrs/attrs>`_,\\nand the latest release on '
'`PyPI <https://pypi.org/project/attrs/>`_.\\nIt\\u2019s rigorously tested '
'on Python 2.7, 3.5+, and PyPy.\\n\\nWe collect information on **third-party '
'extensions** in our `wiki '
'<https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs>`_.\\nFeel '
"free to browse and add your own!\\n\\nIf you'd like to contribute to "
"``attrs`` you're most welcome and we've written `a little guide "
'<https://www.attrs.org/en/latest/contributing.html>`_ to get you '
'started!\\n\\n\\n``attrs`` for '
'Enterprise\\n------------------------\\n\\nAvailable as part of the '
'Tidelift Subscription.\\n\\nThe maintainers of ``attrs`` and thousands of '
'other packages are working with Tidelift to deliver commercial support and '
'maintenance for the open source packages you use to build your '
'applications.\\nSave time, reduce risk, and improve code health, while '
'paying the maintainers of the exact packages you use.\\n`Learn more. '
'<https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo>`_\\n\\n\\nRelease '
'Information\\n===================\\n\\n20.2.0 '
'(2020-09-05)\\n-------------------\\n\\nBackward-incompatible '
'Changes\\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n- ``attr.define()``, '
'``attr.frozen()``, ``attr.mutable()``, and ``attr.field()`` remain '
'**provisional**.\\n\\n This release fixes a bunch of bugs and ergonomics '
'but they remain mostly unchanged.\\n\\n If you wish to use them together '
'with mypy, you can simply drop `this plugin '
'<https://gist.github.com/hynek/1e3844d0c99e479e716169034b5fa963#file-attrs_ng_plugin-py>`_ '
'into your project.\\n\\n Feel free to provide feedback to them in the '
'linked issue #668.\\n\\n We will release the ``attrs`` namespace once we '
'have the feeling that the APIs have properly settled.\\n `#668 '
'<https://github.com/python-attrs/attrs/issues/668>`_\\n\\n\\nChanges\\n^^^^^^^\\n\\n- '
'``attr.define()`` et al now correct detect ``__eq__`` and ``__ne__``.\\n '
'`#671 <https://github.com/python-attrs/attrs/issues/671>`_\\n- '
"``attr.define()`` et al's hybrid behavior now also works correctly when "
'arguments are passed.\\n `#675 '
"<https://github.com/python-attrs/attrs/issues/675>`_\\n- It's possible to "
'define custom ``__setattr__`` methods on slotted classes again.\\n `#681 '
'<https://github.com/python-attrs/attrs/issues/681>`_\\n- In 20.1.0 we '
'introduced the ``inherited`` attribute on the ``attr.Attribute`` class to '
'differentiate attributes that have been inherited and those that have been '
'defined directly on the class.\\n\\n It has shown to be problematic to '
'involve that attribute when comparing instances of ``attr.Attribute`` '
'though, because when sub-classing, attributes from base classes are '
'suddenly not equal to themselves in a super class.\\n\\n Therefore the '
'``inherited`` attribute will now be ignored when hashing and comparing '
'instances of ``attr.Attribute``.\\n `#684 '
'<https://github.com/python-attrs/attrs/issues/684>`_\\n- ``zope.interface`` '
'is now a \\"soft dependency\\" when running the test suite; if '
'``zope.interface`` is not installed when running the test suite, the '
'interface-related tests will be automatically skipped.\\n `#685 '
'<https://github.com/python-attrs/attrs/issues/685>`_\\n- The ergonomics of '
'creating frozen classes using ``@define(frozen=True)`` and sub-classing '
"frozen classes has been improved:\\n you don't have to set "
'``on_setattr=None`` anymore.\\n `#687 '
'<https://github.com/python-attrs/attrs/issues/687>`_\\n\\n`Full changelog '
'<https://www.attrs.org/en/stable/changelog.html>`_.\\n\\nCredits\\n=======\\n\\n``attrs`` '
'is written and maintained by `<NAME> '
'<https://hynek.me/>`_.\\n\\nThe development is kindly supported by '
'`Variomedia AG <https://www.variomedia.de/>`_.\\n\\nA full list of '
"contributors can be found in `GitHub's overview "
'<https://github.com/python-attrs/attrs/graphs/contributors>`_.\\n\\nIt\\u2019s '
'the spiritual successor of `characteristic '
'<https://characteristic.readthedocs.io/>`_ and aspires to fix some of it '
'clunkiness and unfortunate decisions.\\nBoth were inspired by '
'Twisted\\u2019s `FancyEqMixin '
'<https://twistedmatrix.com/documents/current/api/twisted.python.util.FancyEqMixin.html>`_ '
'but both are implemented using class decorators because `subclassing is bad '
'for you <https://www.youtube.com/watch?v=3MNVP9-hglc>`_, '
'm\\u2019kay?\\n\\n\\n", "origin": "<NAME> <<EMAIL>>"}'),
('/usr/lib/python3.8/site-packages/six',
'{"name": "six", "version": "1.15.0", "type": "python", "location": '
'"/usr/lib/python3.8/site-packages", "files": '
'["/usr/lib/python3.8/site-packages/__pycache__/six.cpython-38.pyc", '
'"/usr/lib/python3.8/site-packages/six-1.15.0.dist-info/INSTALLER", '
'"/usr/lib/python3.8/site-packages/six-1.15.0.dist-info/LICENSE", '
'"/usr/lib/python3.8/site-packages/six-1.15.0.dist-info/METADATA", '
'"/usr/lib/python3.8/site-packages/six-1.15.0.dist-info/RECORD", '
'"/usr/lib/python3.8/site-packages/six-1.15.0.dist-info/WHEEL", '
'"/usr/lib/python3.8/site-packages/six-1.15.0.dist-info/top_level.txt", '
'"/usr/lib/python3.8/site-packages/six.py"], "license": "MIT", "metadata": '
'"Metadata-Version: 2.1\\nName: six\\nVersion: 1.15.0\\nSummary: Python 2 '
'and 3 compatibility utilities\\nHome-page: '
'https://github.com/benjaminp/six\\nAuthor: Benjamin '
'Peterson\\nAuthor-email: <EMAIL>\\nLicense: MIT\\nPlatform: '
'UNKNOWN\\nClassifier: Development Status :: 5 - '
'Production/Stable\\nClassifier: Programming Language :: Python :: '
'2\\nClassifier: Programming Language :: Python :: 3\\nClassifier: Intended '
'Audience :: Developers\\nClassifier: License :: OSI Approved :: MIT '
'License\\nClassifier: Topic :: Software Development :: '
'Libraries\\nClassifier: Topic :: Utilities\\nRequires-Python: >=2.7, '
'!=3.0.*, !=3.1.*, !=3.2.*\\n\\n.. image:: '
'https://img.shields.io/pypi/v/six.svg\\n :target: '
'https://pypi.org/project/six/\\n :alt: six on PyPI\\n\\n.. image:: '
'https://travis-ci.org/benjaminp/six.svg?branch=master\\n :target: '
'https://travis-ci.org/benjaminp/six\\n :alt: six on TravisCI\\n\\n.. '
'image:: https://readthedocs.org/projects/six/badge/?version=latest\\n '
":target: https://six.readthedocs.io/\\n :alt: six's documentation on Read "
'the Docs\\n\\n.. image:: '
'https://img.shields.io/badge/license-MIT-green.svg\\n :target: '
'https://github.com/benjaminp/six/blob/master/LICENSE\\n :alt: MIT License '
'badge\\n\\nSix | |
in a new "
'execution\n'
'frame (see section Naming and binding), using a newly created '
'local\n'
'namespace and the original global namespace. (Usually, the '
'suite\n'
"contains only function definitions.) When the class's suite "
'finishes\n'
'execution, its execution frame is discarded but its local '
'namespace is\n'
'saved. [4] A class object is then created using the inheritance '
'list\n'
'for the base classes and the saved local namespace for the '
'attribute\n'
'dictionary. The class name is bound to this class object in '
'the\n'
'original local namespace.\n'
'\n'
"**Programmer's note:** Variables defined in the class definition "
'are\n'
'class variables; they are shared by all instances. To create '
'instance\n'
'variables, they can be set in a method with "self.name = '
'value". Both\n'
'class and instance variables are accessible through the '
'notation\n'
'""self.name"", and an instance variable hides a class variable '
'with\n'
'the same name when accessed in this way. Class variables can be '
'used\n'
'as defaults for instance variables, but using mutable values '
'there can\n'
'lead to unexpected results. For *new-style class*es, '
'descriptors can\n'
'be used to create instance variables with different '
'implementation\n'
'details.\n'
'\n'
'Class definitions, like function definitions, may be wrapped by '
'one or\n'
'more *decorator* expressions. The evaluation rules for the '
'decorator\n'
'expressions are the same as for functions. The result must be a '
'class\n'
'object, which is then bound to the class name.\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
'[1] The exception is propagated to the invocation stack unless\n'
' there is a "finally" clause which happens to raise another\n'
' exception. That new exception causes the old one to be '
'lost.\n'
'\n'
'[2] Currently, control "flows off the end" except in the case '
'of\n'
' an exception or the execution of a "return", "continue", or\n'
' "break" statement.\n'
'\n'
'[3] A string literal appearing as the first statement in the\n'
' function body is transformed into the function\'s "__doc__"\n'
" attribute and therefore the function's *docstring*.\n"
'\n'
'[4] A string literal appearing as the first statement in the '
'class\n'
' body is transformed into the namespace\'s "__doc__" item '
'and\n'
" therefore the class's *docstring*.\n",
'context-managers': '\n'
'With Statement Context Managers\n'
'*******************************\n'
'\n'
'New in version 2.5.\n'
'\n'
'A *context manager* is an object that defines the '
'runtime context to\n'
'be established when executing a "with" statement. The '
'context manager\n'
'handles the entry into, and the exit from, the desired '
'runtime context\n'
'for the execution of the block of code. Context '
'managers are normally\n'
'invoked using the "with" statement (described in section '
'The with\n'
'statement), but can also be used by directly invoking '
'their methods.\n'
'\n'
'Typical uses of context managers include saving and '
'restoring various\n'
'kinds of global state, locking and unlocking resources, '
'closing opened\n'
'files, etc.\n'
'\n'
'For more information on context managers, see Context '
'Manager Types.\n'
'\n'
'object.__enter__(self)\n'
'\n'
' Enter the runtime context related to this object. The '
'"with"\n'
" statement will bind this method's return value to the "
'target(s)\n'
' specified in the "as" clause of the statement, if '
'any.\n'
'\n'
'object.__exit__(self, exc_type, exc_value, traceback)\n'
'\n'
' Exit the runtime context related to this object. The '
'parameters\n'
' describe the exception that caused the context to be '
'exited. If the\n'
' context was exited without an exception, all three '
'arguments will\n'
' be "None".\n'
'\n'
' If an exception is supplied, and the method wishes to '
'suppress the\n'
' exception (i.e., prevent it from being propagated), '
'it should\n'
' return a true value. Otherwise, the exception will be '
'processed\n'
' normally upon exit from this method.\n'
'\n'
' Note that "__exit__()" methods should not reraise the '
'passed-in\n'
" exception; this is the caller's responsibility.\n"
'\n'
'See also:\n'
'\n'
' **PEP 343** - The "with" statement\n'
' The specification, background, and examples for the '
'Python "with"\n'
' statement.\n',
'continue': '\n'
'The "continue" statement\n'
'************************\n'
'\n'
' continue_stmt ::= "continue"\n'
'\n'
'"continue" may only occur syntactically nested in a "for" or '
'"while"\n'
'loop, but not nested in a function or class definition or '
'"finally"\n'
'clause within that loop. It continues with the next cycle of '
'the\n'
'nearest enclosing loop.\n'
'\n'
'When "continue" passes control out of a "try" statement with a\n'
'"finally" clause, that "finally" clause is executed before '
'really\n'
'starting the next loop cycle.\n',
'conversions': '\n'
'Arithmetic conversions\n'
'**********************\n'
'\n'
'When a description of an arithmetic operator below uses the '
'phrase\n'
'"the numeric arguments are converted to a common type," the '
'arguments\n'
'are coerced using the coercion rules listed at Coercion '
'rules. If\n'
'both arguments are standard numeric types, the following '
'coercions are\n'
'applied:\n'
'\n'
'* If either argument is a complex number, the other is '
'converted to\n'
' complex;\n'
'\n'
'* otherwise, if either argument is a floating point number, '
'the\n'
' other is converted to floating point;\n'
'\n'
'* otherwise, if either argument is a long integer, the other '
'is\n'
' converted to long integer;\n'
'\n'
'* otherwise, both must be plain integers and no conversion '
'is\n'
' necessary.\n'
'\n'
'Some additional rules apply for certain operators (e.g., a '
'string left\n'
"argument to the '%' operator). Extensions can define their "
'own\n'
'coercions.\n',
'customization': '\n'
'Basic customization\n'
'*******************\n'
'\n'
'object.__new__(cls[, ...])\n'
'\n'
' Called to create a new instance of class *cls*. '
'"__new__()" is a\n'
' static method (special-cased so you need not declare it '
'as such)\n'
' that takes the class of which an instance was requested '
'as its\n'
' first argument. The remaining arguments are those '
'passed to the\n'
' object constructor expression (the call to the class). '
'The return\n'
' value of "__new__()" should be the new object instance '
'(usually an\n'
' instance of *cls*).\n'
'\n'
' Typical implementations create a new instance of the '
'class by\n'
' invoking the superclass\'s "__new__()" method using\n'
' "super(currentclass, cls).__new__(cls[, ...])" with '
'appropriate\n'
' arguments and then modifying the newly-created instance '
'as\n'
' necessary before returning it.\n'
'\n'
' If "__new__()" returns an instance of *cls*, then the '
'new\n'
' instance\'s "__init__()" method will be invoked like\n'
' "__init__(self[, ...])", where *self* is the new '
'instance and the\n'
' remaining arguments are the same as were passed to '
'"__new__()".\n'
'\n'
' If "__new__()" does not return an instance of *cls*, '
'then the new\n'
' instance\'s "__init__()" method will not be invoked.\n'
'\n'
' "__new__()" is intended mainly to allow subclasses of '
'immutable\n'
' types (like int, str, or tuple) to customize instance '
'creation. It\n'
' is also commonly overridden in custom metaclasses in '
'order to\n'
' customize class creation.\n'
'\n'
'object.__init__(self[, ...])\n'
'\n'
' Called after the instance has been created (by '
'"__new__()"), but\n'
' before it is returned to the caller. The arguments are '
'those\n'
' passed to the class constructor expression. If a base '
'class has an\n'
' "__init__()" method, the derived class\'s "__init__()" '
'method, if\n'
' any, must explicitly call it to ensure proper '
'initialization of the\n'
' base class part of the instance; for example:\n'
' "BaseClass.__init__(self, [args...])".\n'
'\n'
' Because "__new__()" and "__init__()" work together in '
'constructing\n'
' objects ("__new__()" to create it, and "__init__()" to '
'customise\n'
' it), no non-"None" value may be returned by '
'"__init__()"; doing so\n'
' will cause a "TypeError" to be raised at runtime.\n'
'\n'
'object.__del__(self)\n'
'\n'
' Called when the instance is about to be destroyed. This '
'is also\n'
' called a destructor. | |
<filename>models/networks.py<gh_stars>1-10
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[], train=True):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from <NAME>'s neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
#print(netG)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'oinnlitho':
net = oinnlitho(modes1=50, modes2=50, width=16, in_channel=1, refine_channel=32, refine_kernel=3)
elif netG == 'oinnopc':
net = oinnopc(modes1=50, modes2=50, width=16, in_channel=1, refine_channel=32, refine_kernel=3)
elif netG == 'oinnopcg':
net = oinnopcg()
elif netG == 'oinnopcgv2':
net = oinnopcgv2()
elif netG == 'oinnopc_v001':
net = oinnopc_v001(modes1=50, modes2=50, width=16, in_channel=1, refine_channel=32, refine_kernel=3)
elif netG == 'oinnopc_large':
net = oinnopc_large(modes1=50, modes2=50, width=16, in_channel=1, refine_channel=32, refine_kernel=3)
elif netG == 'oinnopc_multi':
net = oinnopc_multi(modes1=50, modes2=50, width=16, in_channel=1, refine_channel=32, refine_kernel=3, train=train)
elif netG == 'oinnopc_multi_v2':
net = oinnopc_multi_v2(modes1=50, modes2=50, width=16, in_channel=1, refine_channel=32, refine_kernel=3, train=train)
elif netG == 'unet':
net = unet(1, 1, 3, 0.5)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == | |
frequencies[i:],
for other_frequency_index, other_row in enumerate(ons_or_offs[frequency_index + 1:, :], start=frequency_index + 1):
# for each non-claimed 1 which is less than theshold_ms away in time,
upper_limit_index = top_level_frequency_one_index + threshold_samples
lower_limit_index = top_level_frequency_one_index - threshold_samples
other_ones = np.reshape(np.where(other_row == 1), (-1,)) # Get the indexes of all the 1s in row
tmp = np.reshape(np.where((other_ones >= lower_limit_index) # Get the indexes in the other_ones array of all items in bounds
& (other_ones <= upper_limit_index)), (-1,))
other_ones = other_ones[tmp] # Get the indexes of all the 1s in the row that are in bounds
if len(other_ones) > 0:
unclaimed_idx = other_ones[0] # Take the first one
claimed.append((other_frequency_index, unclaimed_idx))
elif len(claimed) < 3:
# revert the top-most 1 to 0
ons_or_offs[frequency_index, top_level_frequency_one_index] = 0
claimed = []
break # Break from the for-each-frequencies[i:] loop so we can move on to the next item in the top-most freq
elif len(claimed) >= 3:
found_a_front = True
# this group of so-far-claimed forms a front
claimed_as_indexes = tuple(np.array(claimed).T)
ons_or_offs[claimed_as_indexes] = this_id
this_id += 1
claimed = []
break # Move on to the next item in the top-most array
# If we never found a frequency that did not have a matching offset, handle that case here
if len(claimed) >= 3:
claimed_as_indexes = tuple(np.array(claimed).T)
ons_or_offs[claimed_as_indexes] = this_id
this_id += 1
claimed = []
elif found_a_front:
this_id += 1
else:
ons_or_offs[frequency_index, top_level_frequency_one_index] = 0
claimed = []
return ons_or_offs
def _lookup_offset_by_onset_idx(onset_idx, onsets, offsets):
"""
Takes an onset index (freq, sample) and returns the offset index (freq, sample)
such that frequency index is the same, and sample index is the minimum of all
offsets ocurring after the given onset. If there are no offsets after the given
onset in that frequency channel, the final sample in that channel is returned.
"""
assert len(onset_idx) == 2, "Onset_idx must be a tuple of the form (freq_idx, sample_idx)"
frequency_idx, sample_idx = onset_idx
offset_sample_idxs = np.reshape(np.where(offsets[frequency_idx, :] == 1), (-1,))
# get the offsets which occur after onset
offset_sample_idxs = offset_sample_idxs[offset_sample_idxs > sample_idx]
if len(offset_sample_idxs) == 0:
# There is no offset in this frequency that occurs after the onset, just return the last sample
chosen_offset_sample_idx = offsets.shape[1] - 1
assert offsets[frequency_idx, chosen_offset_sample_idx] == 0
else:
# Return the closest offset to the onset
chosen_offset_sample_idx = offset_sample_idxs[0]
assert offsets[frequency_idx, chosen_offset_sample_idx] != 0
return frequency_idx, chosen_offset_sample_idx
def _get_front_idxs_from_id(fronts, id):
"""
Return a list of tuples of the form (frequency_idx, sample_idx),
corresponding to all the indexes of the given front.
"""
if id == -1:
# This is the only special case.
# -1 is the index of the catch-all final column offset front.
freq_idxs = np.arange(fronts.shape[0], dtype=np.int64)
sample_idxs = np.ones(len(freq_idxs), dtype=np.int64) * (fronts.shape[1] - 1)
else:
freq_idxs, sample_idxs = np.where(fronts == id)
return [(f, i) for f, i in zip(freq_idxs, sample_idxs)]
def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets):
"""
Returns a front ID which is the id of the offset front that contains the most overlap
with offsets that correspond to the given onset front ID.
"""
noverlaps = [] # will contain tuples of the form (number_overlapping, offset_front_id)
for offset_front_id in candidate_offset_front_ids:
offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id)
offset_front_idxs = [(f, i) for f, i in zip(offset_front_f_idxs, offset_front_s_idxs)]
noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets)))
noverlaps.append((noverlap_this_id, offset_front_id))
_overlapped, chosen_offset_front_id = max(noverlaps, key=lambda t: t[0])
return int(chosen_offset_front_id)
def _get_offset_front_id_after_onset_sample_idx(onset_sample_idx, offset_fronts):
"""
Returns the offset_front_id which corresponds to the offset front which occurs
first entirely after the given onset sample_idx.
"""
# get all the offset_front_ids
offset_front_ids = [i for i in np.unique(offset_fronts) if i != 0]
best_id_so_far = -1
closest_offset_sample_idx = sys.maxsize
for offset_front_id in offset_front_ids:
# get all that offset front's indexes
offset_front_idxs = _get_front_idxs_from_id(offset_fronts, offset_front_id)
# get the sample indexes
offset_front_sample_idxs = [s for _f, s in offset_front_idxs]
# if each sample index is greater than onset_sample_idx, keep this offset front if it is the best one so far
min_sample_idx = min(offset_front_sample_idxs)
if min_sample_idx > onset_sample_idx and min_sample_idx < closest_offset_sample_idx:
closest_offset_sample_idx = min_sample_idx
best_id_so_far = offset_front_id
assert best_id_so_far > 1 or best_id_so_far == -1
return best_id_so_far
def _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts):
"""
Get the ID corresponding to the offset which occurs first after the given onset_front_id.
By `first` I mean the front which contains the offset which is closest to the latest point
in the onset front. By `after`, I mean that the offset must contain only offsets which
occur after the latest onset in the onset front.
If there is no appropriate offset front, the id returned is -1.
"""
# get the onset idxs for this front
onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# get the sample idxs for this front
onset_sample_idxs = [s for _f, s in onset_idxs]
# get the latest onset in this onset front
latest_onset_in_front = max(onset_sample_idxs)
offset_front_id_after_this_onset_front = _get_offset_front_id_after_onset_sample_idx(latest_onset_in_front, offset_fronts)
return int(offset_front_id_after_this_onset_front)
def _match_offset_front_id_to_onset_front_id(onset_front_id, onset_fronts, offset_fronts, onsets, offsets):
"""
Find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the
given onset front.
The offset front which contains the most of such offsets is the match.
If there are no such offset fronts, return -1.
"""
# find all offset fronts which are composed of at least one offset which corresponds to one of the onsets in the onset front
# the offset front which contains the most of such offsets is the match
# get the onsets that make up front_id
onset_idxs = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# get the offsets that match the onsets in front_id
offset_idxs = [_lookup_offset_by_onset_idx(i, onsets, offsets) for i in onset_idxs]
# get all offset_fronts which contain at least one of these offsets
candidate_offset_front_ids = set([int(offset_fronts[f, i]) for f, i in offset_idxs])
# It is possible that offset_idxs contains offset indexes that correspond to offsets that did not
# get formed into a front - those will have a front ID of 0. Remove them.
candidate_offset_front_ids = [id for id in candidate_offset_front_ids if id != 0]
if candidate_offset_front_ids:
chosen_offset_front_id = _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offset_idxs)
else:
chosen_offset_front_id = _get_offset_front_id_after_onset_front(onset_front_id, onset_fronts, offset_fronts)
return chosen_offset_front_id
def _get_consecutive_portions_of_front(front):
"""
Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form),
such that each list yielded is consecutive in frequency.
"""
last_f = None
ls = []
for f, s in front:
if last_f is not None and f != last_f + 1:
yield ls
ls = []
ls.append((f, s))
last_f = f
yield ls
def _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id):
"""
Gets an onset_front and an offset_front such that they both occupy at least some of the same
frequency channels, then returns the portion of each that overlaps with the other.
"""
# Get the onset front of interest
onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)
# Get the offset front of interest
offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id)
# Keep trying consecutive portions of this onset front until we find a consecutive portion
# that overlaps with part of the offset front
consecutive_portions_of_onset_front = [c for c in _get_consecutive_portions_of_front(onset_front)]
for consecutive_portion_of_onset_front in consecutive_portions_of_onset_front:
# Only get the segment of this front that overlaps in frequencies with the onset front of interest
onset_front_frequency_indexes = [f for f, _ in consecutive_portion_of_onset_front]
overlapping_offset_front = [(f, s) for f, s in offset_front if f in onset_front_frequency_indexes]
# Only get as much of this overlapping portion as is actually consecutive
for consecutive_portion_of_offset_front in _get_consecutive_portions_of_front(overlapping_offset_front):
if consecutive_portion_of_offset_front:
# Just return the first one we get - if we get any it means we found a portion of overlap
return consecutive_portion_of_onset_front, consecutive_portion_of_offset_front
return [], [] # These two fronts have no overlap
def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap):
"""
Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between
`onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively.
This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of
| |
<filename>umbra/scenario/environment.py
import re
import os
import json
import yaml
import logging
import psutil
import subprocess
import time
from mininet.net import Containernet
from mininet.node import Controller, OVSKernelSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Link
from mininet import clean
import docker
logger = logging.getLogger(__name__)
setLogLevel("info")
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("docker").setLevel(logging.WARNING)
TRIGGER_DELAY = 2
class EnvironmentParser:
def __init__(self):
self.topology = None
self.deploy = {}
def get(self, what):
if what == "topology":
return self.topology
if what == "deploy":
return self.deploy
return None
def parse_nodes(self):
self.deploy["nodes"] = {}
self.deploy["switches"] = []
nodes = self.topology.get("nodes")
for _node_id, node in nodes.items():
node_type = node.get("type")
node_id = node.get("name")
if node_type == "container":
self.deploy["nodes"][node_id] = node
elif node_type == "switch":
self.deploy["switches"].append(node_id)
logger.info("Plugin nodes %s", self.deploy["nodes"].keys())
logger.info("Plugin switches %s", self.deploy["switches"])
def parse_links(self):
self.deploy["links"] = {}
links = self.topology.get("links")
for _link_id, link in links.items():
link_type = link.get("type")
link_src = link.get("src")
link_dst = link.get("dst")
if link_type == "internal" or link_type == "external":
link_id = link_src + "-" + link_dst
params_dst = link.get("params_dst")
params_src = link.get("params_src")
self.deploy["links"][link_id] = {
"type": link_type,
"src": link_src,
"dst": link_dst,
"params_src": params_src,
"params_dst": params_dst,
"resources": link.get("resources", {}),
}
else:
logger.info("unknown link type %s", link_type)
logger.info("Plugin links %s", self.deploy["links"].keys())
def build(self, topology):
logger.debug("Containernet plugin parsing topology")
logger.debug(f"{topology}")
self.topology = topology
self.parse_nodes()
self.parse_links()
return self.deploy
class Environment:
def __init__(self, topo):
self.parser = EnvironmentParser()
self.topo = topo
self.net = None
self.nodes = {}
self.switches = {}
self.nodes_info = {}
self._docker_client = None
self._connected_to_docker = False
self._docker_network = None
logger.debug("Environment Instance Created")
logger.debug(f"{json.dumps(self.topo, indent=4)}")
def connect_docker(self):
try:
self._docker_client = docker.from_env()
except Exception as e:
self._docker_client = None
logger.warn(
"could not connect to docker socket - check if docker is installed/running %s",
e,
)
else:
self._connected_to_docker = True
def create_docker_network(self, network_name="umbra"):
self.connect_docker()
if self._connected_to_docker:
try:
self._docker_network = self._docker_client.networks.create(
network_name, driver="bridge"
)
except docker.errors.APIError as e:
logger.debug(f"Docker network not created - API Error {e}")
return False
else:
logger.debug(f"Docker network - {network_name} - created")
return True
else:
logger.debug(f"Could not create docker network")
return False
def remove_docker_network(self, network_name="umbra"):
self.connect_docker()
if self._connected_to_docker:
try:
if not self._docker_network:
self._docker_network = self._docker_client.networks.get(
network_name
)
if self._docker_network:
self._docker_network.remove()
except docker.errors.APIError as e:
logger.debug(f"Docker network not removed - API Error {e}")
return False
else:
logger.debug(f"Docker network - {network_name} - removed")
return True
else:
logger.debug(f"Could not remove docker network")
return False
def prune_docker_containers(self):
self.connect_docker()
if self._connected_to_docker:
try:
pruned = self._docker_client.containers.prune()
logger.debug(f"Docker containers pruned {pruned}")
except docker.errors.APIError as e:
logger.debug(f"Docker containers not pruned - API Error {e}")
return False
else:
return True
else:
logger.debug(f"Could not prune docker containers")
return False
def prune_docker_volumes(self):
self.connect_docker()
if self._connected_to_docker:
try:
pruned = self._docker_client.volumes.prune()
logger.debug(f"Docker volumes pruned {pruned}")
except docker.errors.APIError as e:
logger.debug(f"Docker volumes not pruned - API Error {e}")
return False
else:
return True
else:
logger.debug(f"Could not prune docker volumes")
return False
def remove_docker_container(self, container_name):
self.connect_docker()
if self._connected_to_docker:
try:
container = self._docker_client.containers.get(container_name)
container.remove()
except docker.errors.APIError as e:
logger.debug(f"Docker container not removed - API Error {e}")
return False
else:
logger.debug(f"Docker container - {container_name} - removed")
return True
else:
logger.debug(f"Could not remove docker container")
return False
def remove_docker_container_chaincodes(self):
self.connect_docker()
chaincodes_removed = {}
if self._connected_to_docker:
try:
containers = self._docker_client.containers.list()
for container in containers:
container_name = container.name
if "dev-peer" in container_name:
ack = self.remove_docker_container(container_name)
chaincodes_removed[container_name] = ack
except docker.errors.APIError as e:
logger.debug(f"Docker container chaincodes not removed - API Error {e}")
return False
else:
logger.debug(
f"Docker container chaincodes removed {chaincodes_removed}"
)
ack_all = all(list(chaincodes_removed.values()))
return ack_all
else:
logger.debug(f"Could not remove docker container")
return False
def _create_network(self):
self.net = Containernet(controller=Controller, link=TCLink)
self.net.addController("c0")
logger.info("Created network: %r" % self.net)
def _add_container(self, node):
def calculate_cpu_cfs_values(cpu_resources):
vcpus = int(cpu_resources.get("cpus", 1))
cpu_bw = float(cpu_resources.get("cpu_bw", 1.0))
cpu_bw_p = 100000 * vcpus
cpu_bw_q = int(cpu_bw_p * cpu_bw)
return cpu_bw_p, cpu_bw_q
resources = node.get("resources")
memory = resources.get("memory", 1024)
cpu_bw_p, cpu_bw_q = calculate_cpu_cfs_values(resources)
mng_ip = node.get("mng_intf", None)
logger.debug("Adding container: %s - %s", node.get("name"), node.get("image"))
container = self.net.addDocker(
node.get("name"),
dcmd=node.get("command", None),
dimage=node.get("image"),
ip=mng_ip,
volumes=node.get("volumes", []),
cpu_period=cpu_bw_p,
cpu_quota=cpu_bw_q,
cpuset_cpus="",
mem_limit=str(memory) + "m",
memswap_limit=0,
environment=node.get("env", None),
ports=node.get("ports", []),
port_bindings=node.get("port_bindings", {}),
working_dir=node.get("working_dir", None),
extra_hosts=node.get("extra_hosts", {}),
network_mode=node.get("network_mode", "none"),
)
logger.debug("Added container: %s", node.get("name"))
return container
def _add_nodes(self):
nodes = self.topo.get("nodes")
for node_id, node in nodes.items():
node_type = node.get("type")
if node_type == "container":
added_node = self._add_container(node)
self.nodes[node_id] = added_node
else:
logger.info("Node %s not added, unknown format %s", node_id, format)
def _add_switches(self):
switches = self.topo.get("switches")
logger.info("Adding switches %s", switches)
for sw_name in switches:
s = self.net.addSwitch(sw_name, cls=OVSKernelSwitch)
self.switches[sw_name] = s
logger.info("Switch added %s", s)
def _add_links(self):
links = self.topo.get("links")
for link_id, link in links.items():
link_type = link.get("type")
if link_type == "internal":
src = link.get("src")
dst = link.get("dst")
params_src = {}
params_dst = {}
intf_src = None
intf_dst = None
params_s = link.get("params_src", {})
params_d = link.get("params_dst", {})
link_resources = link.get("resources", {})
if params_s:
intf_src = params_s.get("id", None)
ip_src = params_s.get("ip", None)
if ip_src:
params_src["ip"] = str(ip_src)
if params_d:
intf_dst = params_d.get("id", None)
ip_dst = params_d.get("ip", None)
if ip_dst:
params_dst["ip"] = str(ip_dst)
src_node = (
self.nodes.get(src)
if src in self.nodes.keys()
else self.switches.get(src)
)
dst_node = (
self.nodes.get(dst)
if dst in self.nodes.keys()
else self.switches.get(dst)
)
logger.info(
"Link adding src %s - intf_src %s, dst %s, intf_dst %s, params_src %s, params_dst %s, resources %s",
src,
intf_src,
dst,
intf_dst,
params_src,
params_dst,
link_resources,
)
link_stats = self.net.addLink(
src_node,
dst_node,
intfName1=intf_src,
intfName2=intf_dst,
params1=params_src,
params2=params_dst,
cls=TCLink,
**link_resources,
)
logger.info("Link Status %s", link_stats)
else:
logger.info("Link %s not added, unknown type %s", link_id, link_type)
def _add_tun_links(self):
# https://costiser.ro/2016/07/07/overlay-tunneling-with-openvswitch-gre-vxlan-geneve-greoipsec/
# https://blog.scottlowe.org/2013/05/15/examining-open-vswitch-traffic-patterns/#scenario-3-the-isolated-bridge
# https://blog.scottlowe.org/2013/05/07/using-gre-tunnels-with-open-vswitch/
links = self.topo.get("links")
for link_id, link in links.items():
link_type = link.get("type")
if link_type == "external":
src = link.get("src")
dst = link.get("dst")
logger.info(f"Adding external link: src {src} - dst {dst}")
src_node = self.switches.get(src)
dst_node = self.switches.get(dst)
if src_node:
node = src_node
params = link.get("params_src", {})
logger.info(f"Node link: src {src} - params {params}")
elif dst_node:
node = dst_node
params = link.get("params_dst", {})
logger.info(f"Node link: dst {dst} - params {params}")
else:
logger.info(f"Node link not found: nor dst {dst} nor src {src}")
continue
intf_tun_name = params.get("tun_id", None)
tun_remote_ip = params.get("tun_remote_ip", None)
if intf_tun_name and tun_remote_ip:
cmd = f" add-port {node.deployed_name} {intf_tun_name} -- set Interface {intf_tun_name} type=gre options:remote_ip={tun_remote_ip}"
logger.info(f"Adding external link: {cmd}")
ack = node.vsctl(cmd)
logger.info(f"Link external {link_type} {link_id} added")
logger.info(f"Link external vsctl out: {ack}")
else:
logger.info(
f"Could not add external link: missing intf_tun_name {intf_tun_name} or tun_remote_ip {tun_remote_ip}"
)
def _start_network(self):
if self.net:
self.net.start()
logger.info("Started network: %r" % self.net)
def get_host_ip(self):
intf = "docker0"
intfs = psutil.net_if_addrs()
intf_info = intfs.get(intf, None)
if intf_info:
for address in intf_info:
if address.family == 2:
host_address = address.address
return host_address
return None
def get_host_ips(self, host):
intf = "eth0"
config = host.cmd("ifconfig %s 2>/dev/null" % intf)
# logger.info("get host %s config ips %s", host, config)
if not config:
logger.info("Error: %s does not exist!\n", intf)
ips = re.findall(r"\d+\.\d+\.\d+\.\d+", config)
if ips:
# logger.info("host intf ips %s", ips)
ips_dict = {"ip": ips[0], "broadcast": ips[1], "mask": ips[2]}
return ips_dict
return None
def parse_info(self, elements, specie):
full_info = {}
if specie == "hosts":
full_info["hosts"] = {}
for host in elements:
info = {
"name": host.name,
"intfs": dict(
[(intf.name, port) for (intf, port) in host.ports.items()]
),
"host_ip": self.get_host_ips(self.nodes[host.name]).get("ip", None),
}
full_info["hosts"][host.name] = info
if specie == "switches":
full_info["switches"] = {}
for sw in elements:
info = {
"name": sw.name,
"dpid": sw.dpid,
"intfs": dict(
[(intf.name, port) for (intf, port) in sw.ports.items()]
),
}
full_info["switches"][sw.name] = info
if specie == "links":
full_info["links"] = {}
for link in elements:
link_name = str(link)
info = {
"name": link_name,
"src": link.intf1.node.name,
"dst": link.intf2.node.name,
"intf_isup": link.intf1.isUp() and link.intf2.isUp(),
"src-port": link.intf1.name,
"dst-port": link.intf2.name,
}
full_info["links"][link_name] = info
return full_info
def net_topo_info(self):
info = {}
info.update(self.parse_info(self.net.hosts, "hosts"))
info.update(self.parse_info(self.net.switches, "switches"))
info.update(self.parse_info(self.net.links, "links"))
logger.info("Topology info:")
logger.info("%s", info)
return info
def start(self):
self.topo = self.parser.build(self.topo)
self.create_docker_network()
self._create_network()
self._add_nodes()
self._add_switches()
self._add_links()
self._start_network()
self._add_tun_links()
logger.info("Experiment running")
self.nodes_info = self.parse_info(self.net.hosts, "hosts")
info = {
"hosts": self.nodes_info.get("hosts"),
"topology": self.net_topo_info(),
}
return True, info
def _stop_network(self):
if self.net:
self.net.stop()
logger.info("Stopped network: %r" % self.net)
def mn_cleanup(self):
clean.cleanup()
self.remove_docker_container_chaincodes()
self.remove_docker_network()
self.prune_docker_volumes()
def stop(self):
self._stop_network()
self.mn_cleanup()
self.nodes = {}
self.switches = {}
self.nodes_info = {}
self.net = None
return True, {}
def stats(self):
self.nodes_info = self.parse_info(self.net.hosts, "hosts")
info = {
"hosts": self.nodes_info.get("hosts"),
"topology": self.net_topo_info(),
}
return True, info
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, <NAME>
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import functools
import os
import re
import tempfile
from anima.ui.progress_dialog import ProgressDialogManager
from anima.env.mayaEnv.camera_tools import cam_to_chan
from anima.utils import do_db_setup
import pymel.core as pm
import maya.mel as mel
import maya.cmds as cmds
from anima.env.mayaEnv import auxiliary, camera_tools
__last_commands__ = [] # list of dictionaries
__last_tab__ = 'ANIMA_TOOLBOX_LAST_TAB_INDEX'
def repeater(index):
"""repeats the last command with the given index
"""
global __last_commands__
try:
call_data = __last_commands__[index]
return call_data[0](*call_data[1], **call_data[2])
except IndexError:
return None
def repeat_last(call_data):
"""own implementation of pm.repeatLast
"""
global __last_commands__
index = len(__last_commands__)
callable_ = call_data[0]
args = call_data[1]
kwargs = call_data[2]
command = \
'print \\"\\";python(\\\"from anima.env.mayaEnv.toolbox import ' \
'repeater; repeater(%s);\\\");' % index
repeat_last_command = 'repeatLast -ac "%(command)s" -acl "%(label)s";' % {
'command': command,
'label': callable_.__name__
}
print(repeat_last_command)
pm.mel.eval(repeat_last_command)
__last_commands__.append(call_data)
# also call the callable
callable_(*args, **kwargs)
def RepeatedCallback(callable_, *args, **kwargs):
"""Adds the given callable to the last commands list and adds a caller to
the pm.repeatLast
"""
return pm.Callback(
repeat_last, [callable_, args, kwargs]
)
class Color(object):
"""a simple color class
"""
colors = [
(1.000, 0.500, 0.666),
(1.000, 0.833, 0.500),
(0.666, 1.000, 0.500),
(0.500, 1.000, 0.833),
(0.500, 0.666, 1.000),
(0.833, 0.500, 1.000)
]
def __init__(self, index=0):
self.index = index
self.max_colors = len(self.colors)
def change(self):
"""updates the index to the next one
"""
self.index = int((self.index + 1) % self.max_colors)
def reset(self):
"""resets the color index
"""
self.index = 0
@property
def color(self):
"""returns the current color values
"""
return self.colors[self.index]
def UI():
#window setup
width = 260
height = 650
row_spacing = 3
color = Color()
if pm.dockControl("toolbox_dockControl", q=True, ex=True):
pm.deleteUI("toolbox_dockControl")
if pm.window("toolbox_window", q=True, ex=True):
pm.deleteUI("toolbox_window", wnd=True)
toolbox_window = pm.window(
'toolbox_window',
wh=(width, height),
title="Anima ToolBox"
)
#the layout that holds the tabs
main_formLayout = pm.formLayout(
'main_formLayout', nd=100, parent=toolbox_window
)
main_tabLayout = pm.tabLayout(
'main_tabLayout', scr=True, cr=True, parent=main_formLayout
)
#attach the main_tabLayout to main_formLayout
pm.formLayout(
main_formLayout, edit=True,
attachForm=[
(main_tabLayout, "top", 0),
(main_tabLayout, "bottom", 0),
(main_tabLayout, "left", 0),
(main_tabLayout, "right", 0)
]
)
with main_tabLayout:
# ----- GENERAL ------
general_columnLayout = pm.columnLayout(
'general_columnLayout',
adj=True,
cal="center",
rs=row_spacing
)
with general_columnLayout:
color.change()
pm.button(
'selectionManager_button',
l="Selection Manager",
c=RepeatedCallback(General.selection_manager),
ann="Selection Manager",
bgc=color.color
)
color.change()
pm.button(
'removeColonFromNames_button',
l="remove colon(:) from node names",
c=RepeatedCallback(General.remove_colon_from_names),
ann="removes the colon (:) character from all "
"selected object names",
bgc=color.color
)
pm.button(
'removePastedFromNames_button',
l="remove \"pasted_\" from node names",
c=RepeatedCallback(General.remove_pasted),
ann="removes the \"passed__\" from all selected "
"object names",
bgc=color.color
)
color.change()
pm.button(
'togglePolyMeshes_button',
l="toggle polymesh visibility",
c=RepeatedCallback(General.toggle_poly_meshes),
ann="toggles the polymesh display in the active model "
"panel",
bgc=color.color
)
color.change()
pm.button(
'selectSetMembers_button',
l="select set members",
c=RepeatedCallback(General.select_set_members),
ann="selects the selected set members in correct "
"order",
bgc=color.color
)
color.change()
pm.button(
'delete_unused_intermediate_shapes_button',
l='Delete Unused Intermediate Shape Nodes',
c=RepeatedCallback(General.delete_unused_intermediate_shapes),
ann='Deletes unused (no connection) intermediate shape nodes',
bgc=color.color
)
color.change()
pm.button(
'export_transform_info_button',
l='Export Transform Info',
c=RepeatedCallback(General.export_transform_info),
ann='exports transform info',
bgc=color.color
)
pm.button(
'import_transform_info_button',
l='Import Transform Info',
c=RepeatedCallback(General.import_transform_info),
ann='imports transform info',
bgc=color.color
)
color.change()
pm.button(
'export_component_transform_info_button',
l='Export Component Transform Info',
c=RepeatedCallback(General.export_component_transform_info),
ann='exports component transform info',
bgc=color.color
)
pm.button(
'import_component_transform_info_button',
l='Import Component Transform Info',
c=RepeatedCallback(General.import_component_transform_info),
ann='imports component transform info',
bgc=color.color
)
color.change()
pm.button(
'generate_thumbnail_button',
l='Generate Thumbnail',
c=RepeatedCallback(General.generate_thumbnail),
ann='Generates thumbnail for current scene',
bgc=color.color
)
color.change()
pm.button(
'cleanup_light_cameras_button',
l='Cleanup Light Cameras',
c=RepeatedCallback(General.cleanup_light_cameras),
ann=General.cleanup_light_cameras.__doc__,
bgc=color.color
)
# ----- REFERENCE ------
reference_columnLayout = pm.columnLayout(
'reference_columnLayout',
adj=True, cal="center", rs=row_spacing)
with reference_columnLayout:
color.reset()
pm.text(l='===== Reference Tools =====')
pm.button(
'duplicate_selected_reference_button',
l='Duplicate Selected Reference',
c=RepeatedCallback(Reference.duplicate_selected_reference),
ann='Duplicates the selected reference',
bgc=color.color
)
color.change()
pm.button(
'get_selected_reference_path_button',
l='Get Selected Reference Path',
c=RepeatedCallback(Reference.get_selected_reference_path),
ann='Prints the selected reference full path',
bgc=color.color
)
pm.button(
'open_selected_reference_button',
l='Open Selected Reference in New Maya',
c=RepeatedCallback(Reference.open_reference_in_new_maya),
ann='Opens the selected reference in new Maya '
'instance',
bgc=color.color
)
color.change()
pm.button(
'publish_model_as_look_dev_button',
l='Model -> LookDev',
c=RepeatedCallback(Reference.publish_model_as_look_dev),
ann='References the current Model scene to the LookDev scene '
'of the same task, creates the LookDev scene if '
'necessary, also reopens the current model scene.',
bgc=color.color
)
color.change()
pm.button(
'fix_reference_namespace_button',
l='Fix Reference Namespace',
c=RepeatedCallback(Reference.fix_reference_namespace),
ann='Fixes old style reference namespaces with new one, '
'creates new versions if necessary.',
bgc=color.color
)
color.change()
pm.button(
'fix_reference_paths_button',
l='Fix Reference Paths',
c=RepeatedCallback(Reference.fix_reference_paths),
ann='Fixes reference paths deeply, so they will use'
'$REPO env var.',
bgc=color.color
)
pm.button(
'archive_button',
l='Archive Current Scene',
c=RepeatedCallback(Reference.archive_current_scene),
ann='Creates a ZIP file containing the current scene and its'
'references in a flat Maya default project folder '
'structure',
bgc=color.color
)
pm.button(
'bind_to_original_button',
l='Bind To Original',
c=RepeatedCallback(Reference.bind_to_original),
ann='Binds the current local references to the ones on the '
'repository',
bgc=color.color
)
pm.button(
'unload_unselected_references_button',
l='Unload UnSelected References',
c=RepeatedCallback(Reference.unload_unselected_references),
ann='Unloads any references that is not related with the '
'selected objects',
bgc=color.color
)
color.change()
pm.text(l='===== Representation Tools =====')
with pm.rowLayout(nc=2, adj=1):
pm.checkBoxGrp(
'generate_repr_types_checkbox_grp',
label='Reprs',
numberOfCheckBoxes=3,
labelArray3=['GPU', 'ASS', 'RS'],
cl4=['left', 'left', 'left', 'left'],
cw4=[51, 50, 50, 50],
valueArray3=[1, 1, 1]
)
pm.checkBox(
'generate_repr_skip_existing_checkBox',
label='Skip existing Reprs.',
value=0
)
pm.button(
'generate_repr_of_all_references_button',
l='Deep Generate Repr Of All References',
c=RepeatedCallback(
Reference.generate_repr_of_all_references_caller
),
ann='Deeply generates desired Representations of all '
'references of this scene',
bgc=color.color
)
pm.button(
'generate_repr_of_scene_button',
l='Generate Repr Of This Scene',
c=RepeatedCallback(Reference.generate_repr_of_scene_caller),
ann='Generates desired Representations of this scene',
bgc=color.color
)
color.change()
with pm.rowLayout(nc=2, adj=1):
pm.radioButtonGrp(
'repr_apply_to_radio_button_grp',
label='Apply To',
# ad3=1,
labelArray2=['Selected', 'All References'],
numberOfRadioButtons=2,
cl3=['left', 'left', 'left'],
cw3=[50, 65, 65],
sl=1
)
pm.button(
'to_base_button',
l='To Base',
c=RepeatedCallback(Reference.to_base),
ann='Convert selected to Base representation',
bgc=color.color
)
pm.button(
'to_gpu_button',
l='To GPU',
c=RepeatedCallback(Reference.to_gpu),
ann='Convert selected to GPU representation',
bgc=color.color
)
pm.button(
'to_ass_button',
l='To ASS',
c=RepeatedCallback(Reference.to_ass),
ann='Convert selected to ASS representation',
bgc=color.color
)
# ----- MODELING ------
modeling_columnLayout = pm.columnLayout(
'modeling_columnLayout',
adj=True, cal="center", rs=row_spacing)
with modeling_columnLayout:
color.reset()
pm.button('toggleFaceNormalDisplay_button',
l="toggle face normal display",
c=RepeatedCallback(
pm.runtime.ToggleFaceNormalDisplay),
ann="toggles face normal display",
bgc=color.color)
pm.button('reverseNormals_button', l="reverse normals",
c=RepeatedCallback(Modeling.reverse_normals),
ann="reverse normals",
bgc=color.color)
pm.button('fixNormals_button', l="fix normals",
c=RepeatedCallback(Modeling.fix_normals),
ann="applies setToFace then conform and then "
"soften edge to all selected objects",
bgc=color.color)
color.change()
pm.button(
'oyHierarchyInstancer_button',
l="hierarchy_instancer on selected",
c=RepeatedCallback(Modeling.hierarchy_instancer),
ann="hierarchy_instancer on selected",
bgc=color.color
)
color.change()
pm.button(
'oyRelaxVerts_button',
l="relax_vertices",
c=RepeatedCallback(Modeling.relax_vertices),
ann="opens relax_vertices",
bgc=color.color
)
color.change()
pm.button(
'create_curve_from_mesh_edges_button',
l="Curve From Mesh Edges",
c=RepeatedCallback(Modeling.create_curve_from_mesh_edges),
ann="Creates a curve from selected mesh edges",
bgc=color.color
)
color.change()
pm.button(
'vertex_aligned_locator_button',
l="Vertex Aligned Locator",
c=RepeatedCallback(Modeling.vertex_aligned_locator),
ann="Creates an aligned locator from selected vertices",
bgc=color.color
)
color.change()
pm.button(
'select_zero_uv_area_faces_button',
l="Filter Zero UV Area Faces",
c=RepeatedCallback(Modeling.select_zero_uv_area_faces),
ann="Selects faces with zero uv area",
bgc=color.color
)
color.change()
with pm.rowLayout(nc=8, rat=(1, "both", 0), adj=1):
pm.text('set_pivot_text', l='Set Pivot', bgc=color.color)
pm.button(
'center_button',
l="C",
c=RepeatedCallback(
Modeling.set_pivot,
0
),
bgc=(0.8, 0.8, 0.8)
)
pm.button(
'minus_X_button',
l="-X",
c=RepeatedCallback(
Modeling.set_pivot,
1
),
bgc=(1.000, 0.500, 0.666)
)
pm.button(
'plus_X_button',
l="+X",
c=RepeatedCallback(
Modeling.set_pivot,
2
),
bgc=(1.000, 0.500, 0.666)
)
pm.button(
'minus_Y_button',
l="-Y",
c=RepeatedCallback(
Modeling.set_pivot,
3
),
bgc=(0.666, 1.000, 0.500)
)
pm.button(
'plus_Y_button',
l="+Y",
c=RepeatedCallback(
Modeling.set_pivot,
4
),
bgc=(0.666, 1.000, 0.500)
)
pm.button(
'minus_Z_button',
l="-X",
c=RepeatedCallback(
Modeling.set_pivot,
5
),
bgc=(0.500, 0.666, 1.000)
)
pm.button(
'plus_Z_button',
l="+X",
c=RepeatedCallback(
Modeling.set_pivot,
6
),
bgc=(0.500, 0.666, 1.000)
)
color.change()
with pm.rowLayout(nc=7, rat=(1, "both", 0), adj=1):
pm.text(l='Text. Res', bgc=color.color)
pm.button(
l="128",
c=RepeatedCallback(
Modeling.set_texture_res,
128
),
bgc=Color.colors[0]
)
pm.button(
l="256",
c=RepeatedCallback(
Modeling.set_texture_res,
256
),
bgc=Color.colors[1]
)
pm.button(
l="512",
c=RepeatedCallback(
Modeling.set_texture_res,
512
),
bgc=Color.colors[2]
)
pm.button(
l="1024",
c=RepeatedCallback(
Modeling.set_texture_res,
1024
),
bgc=Color.colors[3]
)
pm.button(
l='2048',
c=RepeatedCallback(
Modeling.set_texture_res,
2048
),
bgc=Color.colors[4]
)
pm.button(
l='4096',
c=RepeatedCallback(
Modeling.set_texture_res,
4096
),
bgc=Color.colors[5]
)
# ----- RIGGING ------
rigging_columnLayout = pm.columnLayout(
'rigging_columnLayout',
adj=True, cal="center",
rs=row_spacing
)
with rigging_columnLayout:
color.reset()
pm.button(
'oyCreateJointOnCurve_button',
l="oyCreateJointOnCurve",
c=RepeatedCallback(mel.eval, 'oyCreateJointOnCurve'),
ann="opens oyCreateJointOnCurve",
bgc=color.color
)
pm.button(
'oyIKFKSetup_button',
l="oyIKFKSetup",
c=RepeatedCallback(mel.eval, 'oyIKFKSetup'),
ann="opens oyIKFKSetup",
bgc=color.color
)
pm.button(
'oySpineSetupSetup_button',
l="oySpineSetupSetup",
c=RepeatedCallback(mel.eval, 'oyStretchySpineSetup'),
ann="opens oySpineSetupSetup",
bgc=color.color
)
pm.button(
'setupStretchySplineIKCurve_button',
l="setup stretchy splineIK curve",
c=RepeatedCallback(Rigging.setup_stretchy_spline_IKCurve),
ann="connects necessary nodes to calculate arcLength "
"change in percent",
bgc=color.color
)
pm.button(
'selectJointsDeformingTheObject_button',
l="select joints deforming the object",
c=RepeatedCallback(Rigging.select_joints_deforming_object),
ann="select joints that deform the object",
bgc=color.color
)
color.change()
pm.button(
'oyCreateAxialCorrectionGroup_button',
l="create axialCorrectionGroups",
c=RepeatedCallback(Rigging.axial_correction_group),
ann="creates a group node above the selected objects "
"to zero-out the transformations",
bgc=color.color
)
pm.button(
'createAxialCorrectionGroupForClusters_button',
l="create axialCorrectionGroup for clusters",
c=RepeatedCallback(
Rigging.create_axial_correction_group_for_clusters
),
ann="create Axial Correction Group For Clusters",
bgc=color.color
)
color.change()
pm.button(
'setClustersToAbsolute_button',
l="set selected clusters to absolute",
c=RepeatedCallback(Rigging.set_clusters_relative_state, 0),
ann="set Clusters to Absolute",
bgc=color.color
)
pm.button(
'setClustersToRelative_button',
l="set selected clusters to relative",
c=RepeatedCallback(
Rigging.set_clusters_relative_state, 1
),
ann="set Clusters to Relative",
bgc=color.color
)
color.change()
pm.button(
'addControllerShape_button',
l="add controller | |
'zipcode': 215000},
2072: {'name': '昆山市', 'pid': 166, 'zipcode': 215000},
2074: {'name': '相城区', 'pid': 166, 'zipcode': 215000},
2075: {'name': '苏州工业园区', 'pid': 166, 'zipcode': 215000},
2076: {'name': '虎丘区', 'pid': 166, 'zipcode': 215000},
2078: {'name': '启东市', 'pid': 167, 'zipcode': 226000},
2079: {'name': '如东县', 'pid': 167, 'zipcode': 226000},
2080: {'name': '如皋市', 'pid': 167, 'zipcode': 226000},
2081: {'name': '崇川区', 'pid': 167, 'zipcode': 226000},
2082: {'name': '海安县', 'pid': 167, 'zipcode': 226000},
2083: {'name': '海门市', 'pid': 167, 'zipcode': 226000},
2084: {'name': '港闸区', 'pid': 167, 'zipcode': 226000},
2085: {'name': '通州区', 'pid': 167, 'zipcode': 226000},
2086: {'name': '东海县', 'pid': 168, 'zipcode': 222000},
2087: {'name': '新浦区', 'pid': 168, 'zipcode': 222000},
2088: {'name': '海州区', 'pid': 168, 'zipcode': 222000},
2089: {'name': '灌云县', 'pid': 168, 'zipcode': 222000},
2090: {'name': '灌南县', 'pid': 168, 'zipcode': 222000},
2091: {'name': '赣榆区', 'pid': 168, 'zipcode': 222000},
2092: {'name': '连云区', 'pid': 168, 'zipcode': 222000},
2093: {'name': '楚州区', 'pid': 169, 'zipcode': 223001},
2094: {'name': '洪泽区', 'pid': 169, 'zipcode': 223001},
2095: {'name': '涟水县', 'pid': 169, 'zipcode': 223001},
2096: {'name': '淮阴区', 'pid': 169, 'zipcode': 223001},
2097: {'name': '清河区', 'pid': 169, 'zipcode': 223001},
2098: {'name': '清浦区', 'pid': 169, 'zipcode': 223001},
2099: {'name': '盱眙县', 'pid': 169, 'zipcode': 223001},
2100: {'name': '金湖县', 'pid': 169, 'zipcode': 223001},
2101: {'name': '东台市', 'pid': 170, 'zipcode': 224000},
2102: {'name': '亭湖区', 'pid': 170, 'zipcode': 224000},
2103: {'name': '响水县', 'pid': 170, 'zipcode': 224000},
2104: {'name': '大丰区', 'pid': 170, 'zipcode': 224000},
2105: {'name': '射阳县', 'pid': 170, 'zipcode': 224000},
2106: {'name': '建湖县', 'pid': 170, 'zipcode': 224000},
2107: {'name': '滨海县', 'pid': 170, 'zipcode': 224000},
2108: {'name': '盐都区', 'pid': 170, 'zipcode': 224000},
2109: {'name': '阜宁县', 'pid': 170, 'zipcode': 224000},
2110: {'name': '仪征市', 'pid': 171, 'zipcode': 225000},
2111: {'name': '宝应县', 'pid': 171, 'zipcode': 225000},
2112: {'name': '广陵区', 'pid': 171, 'zipcode': 225000},
2113: {'name': '江都区', 'pid': 171, 'zipcode': 225000},
2114: {'name': '维扬区', 'pid': 171, 'zipcode': 225000},
2115: {'name': '邗江区', 'pid': 171, 'zipcode': 225000},
2116: {'name': '高邮市', 'pid': 171, 'zipcode': 225000},
2117: {'name': '丹徒区', 'pid': 172, 'zipcode': 212000},
2118: {'name': '丹阳市', 'pid': 172, 'zipcode': 212000},
2119: {'name': '京口区', 'pid': 172, 'zipcode': 212000},
2120: {'name': '句容市', 'pid': 172, 'zipcode': 212000},
2121: {'name': '扬中市', 'pid': 172, 'zipcode': 212000},
2122: {'name': '润州区', 'pid': 172, 'zipcode': 212000},
2123: {'name': '兴化市', 'pid': 173, 'zipcode': 225300},
2124: {'name': '姜堰区', 'pid': 173, 'zipcode': 225300},
2125: {'name': '泰兴市', 'pid': 173, 'zipcode': 225300},
2126: {'name': '海陵区', 'pid': 173, 'zipcode': 225300},
2127: {'name': '靖江市', 'pid': 173, 'zipcode': 225300},
2128: {'name': '高港区', 'pid': 173, 'zipcode': 225300},
2129: {'name': '宿城区', 'pid': 174, 'zipcode': 223800},
2130: {'name': '宿豫区', 'pid': 174, 'zipcode': 223800},
2131: {'name': '沭阳县', 'pid': 174, 'zipcode': 223800},
2132: {'name': '泗洪县', 'pid': 174, 'zipcode': 223800},
2133: {'name': '泗阳县', 'pid': 174, 'zipcode': 223800},
2134: {'name': '上城区', 'pid': 175, 'zipcode': 310000},
2135: {'name': '下城区', 'pid': 175, 'zipcode': 310000},
2136: {'name': '临安区', 'pid': 175, 'zipcode': 310000},
2137: {'name': '余杭区', 'pid': 175, 'zipcode': 310000},
2138: {'name': '富阳区', 'pid': 175, 'zipcode': 310000},
2139: {'name': '建德市', 'pid': 175, 'zipcode': 310000},
2140: {'name': '拱墅区', 'pid': 175, 'zipcode': 310000},
2141: {'name': '桐庐县', 'pid': 175, 'zipcode': 310000},
2142: {'name': '江干区', 'pid': 175, 'zipcode': 310000},
2143: {'name': '淳安县', 'pid': 175, 'zipcode': 310000},
2144: {'name': '滨江区', 'pid': 175, 'zipcode': 310000},
2145: {'name': '萧山区', 'pid': 175, 'zipcode': 310000},
2146: {'name': '西湖区', 'pid': 175, 'zipcode': 310000},
2147: {'name': '余姚市', 'pid': 176, 'zipcode': 315000},
2148: {'name': '北仑区', 'pid': 176, 'zipcode': 315000},
2149: {'name': '奉化区', 'pid': 176, 'zipcode': 315000},
2150: {'name': '宁海县', 'pid': 176, 'zipcode': 315000},
2151: {'name': '慈溪市', 'pid': 176, 'zipcode': 315000},
2152: {'name': '江东区', 'pid': 176, 'zipcode': 315000},
2153: {'name': '江北区', 'pid': 176, 'zipcode': 315000},
2154: {'name': '海曙区', 'pid': 176, 'zipcode': 315000},
2155: {'name': '象山县', 'pid': 176, 'zipcode': 315000},
2156: {'name': '鄞州区', 'pid': 176, 'zipcode': 315000},
2157: {'name': '镇海区', 'pid': 176, 'zipcode': 315000},
2158: {'name': '乐清市', 'pid': 177, 'zipcode': 325000},
2159: {'name': '平阳县', 'pid': 177, 'zipcode': 325000},
2160: {'name': '文成县', 'pid': 177, 'zipcode': 325000},
2161: {'name': '永嘉县', 'pid': 177, 'zipcode': 325000},
2162: {'name': '泰顺县', 'pid': 177, 'zipcode': 325000},
2163: {'name': '洞头区', 'pid': 177, 'zipcode': 325000},
2164: {'name': '瑞安市', 'pid': 177, 'zipcode': 325000},
2165: {'name': '瓯海区', 'pid': 177, 'zipcode': 325000},
2166: {'name': '苍南县', 'pid': 177, 'zipcode': 325000},
2167: {'name': '鹿城区', 'pid': 177, 'zipcode': 325000},
2168: {'name': '龙湾区', 'pid': 177, 'zipcode': 325000},
2169: {'name': '南湖区', 'pid': 178, 'zipcode': 314000},
2170: {'name': '嘉善县', 'pid': 178, 'zipcode': 314000},
2171: {'name': '平湖市', 'pid': 178, 'zipcode': 314000},
2172: {'name': '桐乡市', 'pid': 178, 'zipcode': 314000},
2173: {'name': '海宁市', 'pid': 178, 'zipcode': 314000},
2174: {'name': '海盐县', 'pid': 178, 'zipcode': 314000},
2175: {'name': '秀洲区', 'pid': 178, 'zipcode': 314000},
2176: {'name': '南浔区', 'pid': 179, 'zipcode': 313000},
2177: {'name': '吴兴区', 'pid': 179, 'zipcode': 313000},
2178: {'name': '安吉县', 'pid': 179, 'zipcode': 313000},
2179: {'name': '德清县', 'pid': 179, 'zipcode': 313000},
2180: {'name': '长兴县', 'pid': 179, 'zipcode': 313000},
2181: {'name': '上虞区', 'pid': 180, 'zipcode': 312000},
2182: {'name': '嵊州市', 'pid': 180, 'zipcode': 312000},
2183: {'name': '新昌县', 'pid': 180, 'zipcode': 312000},
2184: {'name': '绍兴县', 'pid': 180, 'zipcode': 312000},
2185: {'name': '诸暨市', 'pid': 180, 'zipcode': 312000},
2186: {'name': '越城区', 'pid': 180, 'zipcode': 312000},
2187: {'name': '定海区', 'pid': 181, 'zipcode': 316000},
2188: {'name': '岱山县', 'pid': 181, 'zipcode': 316000},
2189: {'name': '嵊泗县', 'pid': 181, 'zipcode': 316000},
2190: {'name': '普陀区', 'pid': 181, 'zipcode': 316000},
2191: {'name': '常山县', 'pid': 182, 'zipcode': 324000},
2192: {'name': '开化县', 'pid': 182, 'zipcode': 324000},
2193: {'name': '柯城区', 'pid': 182, 'zipcode': 324000},
2194: {'name': '江山市', 'pid': 182, 'zipcode': 324000},
2195: {'name': '衢江区', 'pid': 182, 'zipcode': 324000},
2196: {'name': '龙游县', 'pid': 182, 'zipcode': 324000},
2197: {'name': '东阳市', 'pid': 183, 'zipcode': 321000},
2198: {'name': '义乌市', 'pid': 183, 'zipcode': 321000},
2199: {'name': '兰溪市', 'pid': 183, 'zipcode': 321000},
2200: {'name': '婺城区', 'pid': 183, 'zipcode': 321000},
2201: {'name': '武义县', 'pid': 183, 'zipcode': 321000},
2202: {'name': '永康市', 'pid': 183, 'zipcode': 321000},
2203: {'name': '浦江县', 'pid': 183, 'zipcode': 321000},
2204: {'name': '磐安县', 'pid': 183, 'zipcode': 321000},
2205: {'name': '金东区', 'pid': 183, 'zipcode': 321000},
2206: {'name': '三门县', 'pid': 184, 'zipcode': 318000},
2207: {'name': '临海市', 'pid': 184, 'zipcode': 318000},
2208: {'name': '仙居县', 'pid': 184, 'zipcode': 318000},
2209: {'name': '天台县', 'pid': 184, 'zipcode': 318000},
2210: {'name': '椒江区', 'pid': 184, 'zipcode': 318000},
2211: {'name': '温岭市', 'pid': 184, 'zipcode': 318000},
2212: {'name': '玉环市', 'pid': 184, 'zipcode': 318000},
2213: {'name': '路桥区', 'pid': 184, 'zipcode': 318000},
2214: {'name': '黄岩区', 'pid': 184, 'zipcode': 318000},
2215: {'name': '云和县', 'pid': 185, 'zipcode': 323000},
2216: {'name': '庆元县', 'pid': 185, 'zipcode': 323000},
2217: {'name': '景宁畲族自治县', 'pid': 185, 'zipcode': 323000},
2218: {'name': '松阳县', 'pid': 185, 'zipcode': 323000},
2219: {'name': '缙云县', 'pid': 185, 'zipcode': 323000},
2220: {'name': '莲都区', 'pid': 185, 'zipcode': 323000},
2221: {'name': '遂昌县', 'pid': 185, 'zipcode': 323000},
2222: {'name': '青田县', 'pid': 185, 'zipcode': 323000},
2223: {'name': '龙泉市', 'pid': 185, 'zipcode': 323000},
2224: {'name': '包河区', 'pid': 186, 'zipcode': 230000},
2225: {'name': '庐阳区', 'pid': 186, 'zipcode': 230000},
2226: {'name': '瑶海区', 'pid': 186, 'zipcode': 230000},
2227: {'name': '肥东县', 'pid': 186, 'zipcode': 230000},
2228: {'name': '肥西县', 'pid': 186, 'zipcode': 230000},
2229: {'name': '蜀山区', 'pid': 186, 'zipcode': 230000},
2230: {'name': '长丰县', 'pid': 186, 'zipcode': 230000},
2231: {'name': '三山区', 'pid': 187, 'zipcode': 241100},
2232: {'name': '南陵县', 'pid': 187, 'zipcode': 241100},
2233: {'name': '弋江区', 'pid': 187, 'zipcode': 241100},
2234: {'name': '繁昌县', 'pid': 187, 'zipcode': 241100},
2235: {'name': '芜湖县', 'pid': 187, 'zipcode': 241100},
2236: {'name': '镜湖区', 'pid': 187, 'zipcode': 241100},
2237: {'name': '鸠江区', 'pid': 187, 'zipcode': 241100},
2238: {'name': '五河县', 'pid': 188, 'zipcode': 233000},
2239: {'name': '固镇县', 'pid': 188, 'zipcode': 233000},
2240: {'name': '怀远县', 'pid': 188, 'zipcode': 233000},
2241: {'name': '淮上区', 'pid': 188, 'zipcode': 233000},
2242: {'name': '禹会区', 'pid': 188, 'zipcode': 233000},
2243: {'name': '蚌山区', 'pid': 188, 'zipcode': 233000},
2244: {'name': '龙子湖区', 'pid': 188, 'zipcode': 233000},
2245: {'name': '八公山区', 'pid': 189, 'zipcode': 232000},
2246: {'name': '凤台县', 'pid': 189, 'zipcode': 232000},
2247: {'name': '大通区', 'pid': 189, 'zipcode': 810100},
2248: {'name': '潘集区', 'pid': 189, 'zipcode': 232000},
2249: {'name': '田家庵区', 'pid': 189, 'zipcode': 232000},
2250: {'name': '谢家集区', 'pid': 189, 'zipcode': 232000},
2251: {'name': '当涂县', 'pid': 190, 'zipcode': 243000},
2252: {'name': '花山区', 'pid': 190, 'zipcode': 243000},
2253: {'name': '金家庄区', 'pid': 190, 'zipcode': 243000},
2254: {'name': '雨山区', 'pid': 190, 'zipcode': 243000},
2255: {'name': '杜集区', 'pid': 191, 'zipcode': 235000},
2256: {'name': '濉溪县', 'pid': 191, 'zipcode': 235000},
2257: {'name': '烈山区', 'pid': 191, 'zipcode': 235000},
2258: {'name': '相山区', 'pid': 191, 'zipcode': 235000},
2259: {'name': '狮子山区', 'pid': 192, 'zipcode': 244100},
2260: {'name': '郊区', 'pid': 192, 'zipcode': 244100},
2261: {'name': '铜官区', 'pid': 192, 'zipcode': 244100},
2262: {'name': '铜陵县', 'pid': 192, 'zipcode': 244100},
2263: {'name': '大观区', 'pid': 193, 'zipcode': 246000},
2264: {'name': '太湖县', 'pid': 193, 'zipcode': 246000},
2265: {'name': '宜秀区', 'pid': 193, 'zipcode': 246000},
2266: {'name': '宿松县', 'pid': 193, 'zipcode': 246000},
2267: {'name': '岳西县', 'pid': 193, 'zipcode': 246000},
2268: {'name': '怀宁县', 'pid': 193, 'zipcode': 246000},
2269: {'name': '望江县', 'pid': 193, 'zipcode': 246000},
2270: {'name': '枞阳县', 'pid': 193, 'zipcode': 246000},
2271: {'name': '桐城市', 'pid': 193, 'zipcode': 246000},
2272: {'name': '潜山县', 'pid': 193, 'zipcode': 246000},
2273: {'name': '迎江区', 'pid': 193, 'zipcode': 246000},
2274: {'name': '休宁县', 'pid': 194, 'zipcode': 245000},
2275: {'name': '屯溪区', 'pid': 194, 'zipcode': 245000},
2276: {'name': '徽州区', 'pid': 194, 'zipcode': 245000},
2277: {'name': '歙县', 'pid': 194, 'zipcode': 245000},
2278: {'name': '祁门县', 'pid': 194, 'zipcode': 245000},
2279: {'name': '黄山区', 'pid': 194, 'zipcode': 245000},
2280: {'name': '黟县', 'pid': 194, 'zipcode': 245000},
2281: {'name': '全椒县', 'pid': 195, 'zipcode': 239000},
2282: {'name': '凤阳县', 'pid': 195, 'zipcode': 239000},
2283: {'name': '南谯区', 'pid': 195, 'zipcode': 239000},
2284: {'name': '天长市', 'pid': 195, 'zipcode': 239000},
2285: {'name': '定远县', 'pid': 195, 'zipcode': 239000},
2286: {'name': '明光市', 'pid': 195, 'zipcode': 239000},
2287: | |
<filename>rumblesdk/models/scan_options.py
# coding: utf-8
"""
Rumble API
Rumble Network Discovery API # noqa: E501
OpenAPI spec version: 2.11.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ScanOptions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'targets': 'str',
'excludes': 'str',
'scan_name': 'str',
'scan_description': 'str',
'scan_frequency': 'str',
'scan_start': 'str',
'scan_tags': 'str',
'scan_grace_period': 'str',
'agent': 'str',
'rate': 'str',
'max_host_rate': 'str',
'passes': 'str',
'max_attempts': 'str',
'max_sockets': 'str',
'max_group_size': 'str',
'max_ttl': 'str',
'tcp_ports': 'str',
'tcp_excludes': 'str',
'screenshots': 'str',
'nameservers': 'str',
'subnet_ping': 'str',
'subnet_ping_net_size': 'str',
'subnet_ping_sample_rate': 'str',
'host_ping': 'str',
'probes': 'str'
}
attribute_map = {
'targets': 'targets',
'excludes': 'excludes',
'scan_name': 'scan-name',
'scan_description': 'scan-description',
'scan_frequency': 'scan-frequency',
'scan_start': 'scan-start',
'scan_tags': 'scan-tags',
'scan_grace_period': 'scan-grace-period',
'agent': 'agent',
'rate': 'rate',
'max_host_rate': 'max-host-rate',
'passes': 'passes',
'max_attempts': 'max-attempts',
'max_sockets': 'max-sockets',
'max_group_size': 'max-group-size',
'max_ttl': 'max-ttl',
'tcp_ports': 'tcp-ports',
'tcp_excludes': 'tcp-excludes',
'screenshots': 'screenshots',
'nameservers': 'nameservers',
'subnet_ping': 'subnet-ping',
'subnet_ping_net_size': 'subnet-ping-net-size',
'subnet_ping_sample_rate': 'subnet-ping-sample-rate',
'host_ping': 'host-ping',
'probes': 'probes'
}
def __init__(self, targets:str=None, excludes:str=None, scan_name:str=None, scan_description:str=None, scan_frequency:str=None, scan_start:str=None, scan_tags:str=None, scan_grace_period:str=None, agent:str=None, rate:str=None, max_host_rate:str=None, passes:str=None, max_attempts:str=None, max_sockets:str=None, max_group_size:str=None, max_ttl:str=None, tcp_ports:str=None, tcp_excludes:str=None, screenshots:str=None, nameservers:str=None, subnet_ping:str=None, subnet_ping_net_size:str=None, subnet_ping_sample_rate:str=None, host_ping:str=None, probes:str=None): # noqa: E501
"""ScanOptions - a model defined in Swagger""" # noqa: E501
self._targets = None
self._excludes = None
self._scan_name = None
self._scan_description = None
self._scan_frequency = None
self._scan_start = None
self._scan_tags = None
self._scan_grace_period = None
self._agent = None
self._rate = None
self._max_host_rate = None
self._passes = None
self._max_attempts = None
self._max_sockets = None
self._max_group_size = None
self._max_ttl = None
self._tcp_ports = None
self._tcp_excludes = None
self._screenshots = None
self._nameservers = None
self._subnet_ping = None
self._subnet_ping_net_size = None
self._subnet_ping_sample_rate = None
self._host_ping = None
self._probes = None
self.discriminator = None
self.targets = targets
if excludes is not None:
self.excludes = excludes
if scan_name is not None:
self.scan_name = scan_name
if scan_description is not None:
self.scan_description = scan_description
if scan_frequency is not None:
self.scan_frequency = scan_frequency
if scan_start is not None:
self.scan_start = scan_start
if scan_tags is not None:
self.scan_tags = scan_tags
if scan_grace_period is not None:
self.scan_grace_period = scan_grace_period
if agent is not None:
self.agent = agent
if rate is not None:
self.rate = rate
if max_host_rate is not None:
self.max_host_rate = max_host_rate
if passes is not None:
self.passes = passes
if max_attempts is not None:
self.max_attempts = max_attempts
if max_sockets is not None:
self.max_sockets = max_sockets
if max_group_size is not None:
self.max_group_size = max_group_size
if max_ttl is not None:
self.max_ttl = max_ttl
if tcp_ports is not None:
self.tcp_ports = tcp_ports
if tcp_excludes is not None:
self.tcp_excludes = tcp_excludes
if screenshots is not None:
self.screenshots = screenshots
if nameservers is not None:
self.nameservers = nameservers
if subnet_ping is not None:
self.subnet_ping = subnet_ping
if subnet_ping_net_size is not None:
self.subnet_ping_net_size = subnet_ping_net_size
if subnet_ping_sample_rate is not None:
self.subnet_ping_sample_rate = subnet_ping_sample_rate
if host_ping is not None:
self.host_ping = host_ping
if probes is not None:
self.probes = probes
@property
def targets(self):
"""Gets the targets of this ScanOptions. # noqa: E501
:return: The targets of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._targets
@targets.setter
def targets(self, targets):
"""Sets the targets of this ScanOptions.
:param targets: The targets of this ScanOptions. # noqa: E501
:type: str
"""
if targets is None:
raise ValueError("Invalid value for `targets`, must not be `None`") # noqa: E501
self._targets = targets
@property
def excludes(self):
"""Gets the excludes of this ScanOptions. # noqa: E501
:return: The excludes of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._excludes
@excludes.setter
def excludes(self, excludes):
"""Sets the excludes of this ScanOptions.
:param excludes: The excludes of this ScanOptions. # noqa: E501
:type: str
"""
self._excludes = excludes
@property
def scan_name(self):
"""Gets the scan_name of this ScanOptions. # noqa: E501
:return: The scan_name of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._scan_name
@scan_name.setter
def scan_name(self, scan_name):
"""Sets the scan_name of this ScanOptions.
:param scan_name: The scan_name of this ScanOptions. # noqa: E501
:type: str
"""
self._scan_name = scan_name
@property
def scan_description(self):
"""Gets the scan_description of this ScanOptions. # noqa: E501
:return: The scan_description of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._scan_description
@scan_description.setter
def scan_description(self, scan_description):
"""Sets the scan_description of this ScanOptions.
:param scan_description: The scan_description of this ScanOptions. # noqa: E501
:type: str
"""
self._scan_description = scan_description
@property
def scan_frequency(self):
"""Gets the scan_frequency of this ScanOptions. # noqa: E501
:return: The scan_frequency of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._scan_frequency
@scan_frequency.setter
def scan_frequency(self, scan_frequency):
"""Sets the scan_frequency of this ScanOptions.
:param scan_frequency: The scan_frequency of this ScanOptions. # noqa: E501
:type: str
"""
allowed_values = ["once", "hourly", "daily", "weekly", "monthly", "continuous"] # noqa: E501
if scan_frequency not in allowed_values:
raise ValueError(
"Invalid value for `scan_frequency` ({0}), must be one of {1}" # noqa: E501
.format(scan_frequency, allowed_values)
)
self._scan_frequency = scan_frequency
@property
def scan_start(self):
"""Gets the scan_start of this ScanOptions. # noqa: E501
:return: The scan_start of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._scan_start
@scan_start.setter
def scan_start(self, scan_start):
"""Sets the scan_start of this ScanOptions.
:param scan_start: The scan_start of this ScanOptions. # noqa: E501
:type: str
"""
self._scan_start = scan_start
@property
def scan_tags(self):
"""Gets the scan_tags of this ScanOptions. # noqa: E501
:return: The scan_tags of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._scan_tags
@scan_tags.setter
def scan_tags(self, scan_tags):
"""Sets the scan_tags of this ScanOptions.
:param scan_tags: The scan_tags of this ScanOptions. # noqa: E501
:type: str
"""
self._scan_tags = scan_tags
@property
def scan_grace_period(self):
"""Gets the scan_grace_period of this ScanOptions. # noqa: E501
:return: The scan_grace_period of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._scan_grace_period
@scan_grace_period.setter
def scan_grace_period(self, scan_grace_period):
"""Sets the scan_grace_period of this ScanOptions.
:param scan_grace_period: The scan_grace_period of this ScanOptions. # noqa: E501
:type: str
"""
self._scan_grace_period = scan_grace_period
@property
def agent(self):
"""Gets the agent of this ScanOptions. # noqa: E501
:return: The agent of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._agent
@agent.setter
def agent(self, agent):
"""Sets the agent of this ScanOptions.
:param agent: The agent of this ScanOptions. # noqa: E501
:type: str
"""
self._agent = agent
@property
def rate(self):
"""Gets the rate of this ScanOptions. # noqa: E501
:return: The rate of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._rate
@rate.setter
def rate(self, rate):
"""Sets the rate of this ScanOptions.
:param rate: The rate of this ScanOptions. # noqa: E501
:type: str
"""
self._rate = rate
@property
def max_host_rate(self):
"""Gets the max_host_rate of this ScanOptions. # noqa: E501
:return: The max_host_rate of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._max_host_rate
@max_host_rate.setter
def max_host_rate(self, max_host_rate):
"""Sets the max_host_rate of this ScanOptions.
:param max_host_rate: The max_host_rate of this ScanOptions. # noqa: E501
:type: str
"""
self._max_host_rate = max_host_rate
@property
def passes(self):
"""Gets the passes of this ScanOptions. # noqa: E501
:return: The passes of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._passes
@passes.setter
def passes(self, passes):
"""Sets the passes of this ScanOptions.
:param passes: The passes of this ScanOptions. # noqa: E501
:type: str
"""
self._passes = passes
@property
def max_attempts(self):
"""Gets the max_attempts of this ScanOptions. # noqa: E501
:return: The max_attempts of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._max_attempts
@max_attempts.setter
def max_attempts(self, max_attempts):
"""Sets the max_attempts of this ScanOptions.
:param max_attempts: The max_attempts of this ScanOptions. # noqa: E501
:type: str
"""
self._max_attempts = max_attempts
@property
def max_sockets(self):
"""Gets the max_sockets of this ScanOptions. # noqa: E501
:return: The max_sockets of this ScanOptions. # noqa: E501
:rtype: str
"""
return self._max_sockets
@max_sockets.setter
def max_sockets(self, max_sockets):
"""Sets the max_sockets of this ScanOptions.
:param max_sockets: The max_sockets of this ScanOptions. # noqa: E501
:type: str
"""
self._max_sockets = max_sockets
@property
def max_group_size(self):
| |
import unittest
import auto_diff
import numpy as np
class AutoDiffUnitTesting(unittest.TestCase):
def _assertAllClose(self, actual, desired, rtol=1e-07, atol=1e-12, equal_nan=True):
np.testing.assert_allclose(actual, desired, rtol, atol, equal_nan)
class TestSingleVariableAutoDiff(AutoDiffUnitTesting):
def _test_helper(self, f, x, df_dx, debug=False):
if debug:
breakpoint()
input_x = x
f_x = f(input_x)
with auto_diff.AutoDiff(input_x) as x:
y, Jf = auto_diff.get_value_and_jacobian(f(x))
self._assertAllClose(y, f_x)
self._assertAllClose(Jf, df_dx)
# Some bugs only appeared with rectangular Jacobians.
A = np.random.rand(input_x.shape[0], 3 * input_x.shape[0])
b = np.random.rand(input_x.shape[0], 1)
x = np.linalg.lstsq(A, input_x - b, rcond=None)[0]
df_dx = df_dx @ A
with auto_diff.AutoDiff(x) as x:
y, Jf = auto_diff.get_value_and_jacobian(f(A @ x + b))
self._assertAllClose(y, f_x)
self._assertAllClose(Jf, df_dx)
def _test_out(self, f, x, df_dx, debug=False):
if debug:
breakpoint()
input_x = x
f_x = f(input_x)
with auto_diff.AutoDiff(input_x) as x:
out_dest = np.ndarray(f_x.shape)
f(x, out=out_dest)
y, Jf = auto_diff.get_value_and_jacobian(out_dest)
self._assertAllClose(f_x, y)
self._assertAllClose(Jf, df_dx)
def test_add_with_out(self):
def f(x):
y = np.sqrt(x)
out = np.ndarray((3, 1))
np.add(x, y, out=out)
return out
x = np.array([[2.], [4.], [9.0]])
df_dx = np.array([[1 + 0.5 / np.sqrt(2.), 0.0, 0.0],
[0.0, 1 + 1./4., 0.0],
[0.0, 0.0, 1 + 1./6.]])
self._test_helper(f, x, df_dx)
def test_multiply_with_out(self):
def f(x):
y = np.sqrt(x)
out = np.ndarray((3, 1))
np.multiply(x, y, out=out)
return out
x = np.array([[2.], [4.], [9.0]])
df_dx = np.array([[np.sqrt(2) + 1 / np.sqrt(2.), 0.0, 0.0],
[0.0, 2 + 4 * 1./4., 0.0],
[0.0, 0.0, 3 + 9 * 1./6.]])
self._test_helper(f, x, df_dx)
def test_abs(self):
f = np.abs
x = np.array([[2.], [-2.], [0.0]])
df_dx = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 0.0]])
# x = np.array([[2.], [-2.], [4.0]])
# df_dx = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])
with self.assertWarns(UserWarning, msg='abs of a near-zero number, derivative is ill-defined'):
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_sqrt(self):
f = np.sqrt
x = np.array([[2.], [4.], [9.0]])
df_dx = np.array([[0.5 / np.sqrt(2.), 0.0, 0.0],
[0.0, 1./4., 0.0],
[0.0, 0.0, 1./6.]])
# x = np.array([[2.], [-2.], [4.0]])
# df_dx = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_sin(self):
f = np.sin
x = np.array([[np.pi], [-np.pi/2], [np.pi/4]])
df_dx = np.array([[-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0, 0, np.sqrt(2) / 2]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_cos(self):
f = np.cos
x = np.array([[np.pi], [-np.pi/2], [np.pi/4]])
df_dx = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0, 0, -np.sqrt(2) / 2]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_tan(self):
f = np.tan
x = np.array([[np.pi], [-np.pi/3], [np.pi/4]])
df_dx = np.array([[1.0, 0.0, 0.0], [0.0, 4.0, 0.0], [0, 0, 2.0]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_tanh(self):
f = np.tanh
x = np.array([[np.log(2)], [-np.log(3)], [0.0]])
df_dx = np.array([[0.64, 0.0, 0.0], [0.0, 0.36, 0.0], [0, 0, 1.0]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_sinh(self):
f = np.sinh
x = np.array([[np.log(2)], [-np.log(3)], [0.0]])
df_dx = np.array([[1.25, 0.0, 0.0], [0.0, 5 / 3, 0.0], [0, 0, 1.0]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_cosh(self):
f = np.cosh
x = np.array([[np.log(2)], [-np.log(3)], [0.0]])
df_dx = np.array([[2.25/3, 0.0, 0.0], [0.0, -4/3, 0.0], [0, 0, 0.0]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_arctanh(self):
f = np.arctanh
x = np.array([[np.sqrt(1/4)], [0.5], [0.0]])
df_dx = np.array([[4/3, 0.0, 0.0], [0.0, 1/(1 - 0.5**2), 0.0], [0, 0, 1.0]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_arccosh(self):
f = np.arccosh
x = np.array([[np.sqrt(5)], [np.sqrt(10)], [np.sqrt(17)]])
df_dx = np.array([[1/2, 0.0, 0.0], [0.0, 1/3, 0.0], [0, 0, 1.0/4]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_arcsinh(self):
f = np.arcsinh
x = np.array([[np.sqrt(3)], [np.sqrt(8)], [np.sqrt(15)]])
df_dx = np.array([[1/2, 0.0, 0.0], [0.0, 1/3, 0.0], [0, 0, 1.0/4]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_arcsin(self):
f = np.arcsin
x = np.array([[0], [np.sqrt(2)/2], [1/2]])
df_dx = np.array([[1.0, 0.0, 0.0],
[0.0, np.sqrt(2), 0.0],
[0, 0, 2 / np.sqrt(3)]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_arccos(self):
f = np.arccos
x = np.array([[0], [np.sqrt(2)/2], [1/2]])
df_dx = np.array([[-1.0, 0.0, 0.0],
[0.0, -np.sqrt(2), 0.0],
[0, 0, -2 / np.sqrt(3)]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_arctan(self):
f = np.arctan
x = np.array([[-1.0], [99999], [1.0]])
df_dx = np.array([[0.5, 0.0, 0.0],
[0.0, 1.0002e-10, 0.0],
[0, 0, 1/2]])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_log(self):
f = np.log
x = np.array([[1.0], [0.5], [2.5]])
df_dx = np.diag([1.0, 2, .4])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_log2(self):
f = np.log2
x = np.array([[1.0], [0.5], [2.5]])
df_dx = np.diag([1.0, 2, .4]) / np.log(2)
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_log10(self):
f = np.log10
x = np.array([[1.0], [0.5], [2.5]])
df_dx = np.diag([1.0, 2, .4]) / np.log(10)
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_log1p(self):
f = np.log1p
x = np.array([[1.0], [-0.5], [1.5]])
df_dx = np.diag([.5, 2, .4])
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_negative(self):
f = np.negative
x = np.array([[1.0], [-0.5], [1.5]])
df_dx = -np.eye(3)
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_positive(self):
f = np.positive
x = np.array([[1.0], [-0.5], [1.5]])
df_dx = np.eye(3)
self._test_helper(f, x, df_dx)
self._test_out(f, x, df_dx)
def test_decomposing_x(self):
def f(x):
x_1, x_2, x_3 = x
return np.array([x_1 + x_2 + x_3])
x = np.array([[-1.0], [2.0], [3.0]])
df_dx = np.array([[1, 1, 1]])
self._test_helper(f, x, df_dx)
def f(x):
x_1, x_2, x_3 = x
return np.array([x_1 - x_2 - 2 * x_3])
x = np.array([[-1.0], [2.0], [3.0]])
df_dx = np.array([[1, -1, -2]])
self._test_helper(f, x, df_dx)
def f(x):
x_1, x_2, x_3 = x
return np.array([x_1 * x_2 - 2. * x_3 - x_1 * 3.,
x_2 / x_3 - x_2 / 2. + 3. / x_3])
x = np.array([[-1.0], [6.0], [3.0]])
df_dx = np.array([[3.0, -1, -2], [0, .3333333333 - 0.5, -6 / 9.0 - 1 / 3.0]])
self._test_helper(f, x, df_dx)
def f(x):
x_1, x_2 = x
return np.array([x_1**2., np.e**x_2, x_1**x_2])
x = np.array([[3.0], [3.0]])
df_dx = np.array([[6.0, 0.0], [0.0, np.exp(3)], [27.0, 27.0 * np.log(3)]])
def test_constant(self):
def f(x):
return np.array([[0], [1], [2.0]])
x = np.array([[2.0]])
df_dx = np.array([[0], [0], [0.0]])
self._test_helper(f, x, df_dx)
def test_matrixmul(self):
A = np.array([[1.0, 4.0, 7.0], [5.0, 7.0, -200]])
x = np.array([[2.0], [3.0], [-4.0]])
self._test_helper(lambda x: A @ x, x, A)
def test_affine(self):
A = np.array([[1.0, 4.0, 7.0], [5.0, 7.0, -200]])
b = np.array([[3.0], [-np.pi]])
x = np.array([[2.0], [3.0], [-4.0]])
self._test_helper(lambda x: A @ x + b, x, A)
def test_exp_of_affine(self):
A = np.array([[1.0, -2.0, 7.0], [5.0, 7.0, 1]])
b = np.array([[48.0], [-8.0]])
x = np.array([[2.0], [1.0], [-7.0]])
k = A @ x + b
[y_1], [y_2] = np.exp(k)
df_dx = np.diag([y_1, y_2]) @ A
self._test_helper(lambda x: np.exp(A @ x + b), x, df_dx)
def test_assign_scalar(self):
def f(x):
C = 1.0e-7;
retval = C * x
for i in range(3):
retval[i] = 0
return retval
x = np.array([[4.0], [3.0], [6.0], [7.0]])
df_dx = np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0, 0], [0, 0, 0, 0], [0,0, 0, 1.0e-7]])
self._test_helper(f, x, df_dx)
def test_assign_vector(self):
def f(x, u):
C = 1.0e-7;
retval = C * x
for i in range(3):
retval[i] = u
return retval
x = np.array([[4.0], [3.0], [6.0], [7.0]])
df_dx = np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0, 0], [0, 0, 0, 0], [0,0, 0, 1.0e-7]])
u = np.array([[1.0]])
self._test_helper(lambda x: f(x, u), x, df_dx)
def test_mutating_in_place(self):
def f(x):
out = np.zeros((3, 1))
out[1, 0] -= x[0, 0]
out[2, 0] += x[1]
return out
x = np.array([[5.], [2.]])
df_dx = np.array([[0., 0.],
[-1., 0.],
[0., 1.]])
self._test_helper(f, x, df_dx)
def test_mutating_in_place_same_row(self):
def f(x):
out = np.zeros((1, 1))
out[0, 0] += x[0, 0]
out[0, 0] += x[1, 0]
return out
x = np.array([[5.], [2.]])
df_dx = np.array([[1., 1.]])
self._test_helper(f, x, df_dx)
class TestMultipleVariableAutoDiff(AutoDiffUnitTesting):
def _test_helper(self, f, x, u, df_dx, df_du, debug=False):
if debug:
breakpoint()
f_xu = f(x, u)
input_x = x
input_u = u
with auto_diff.AutoDiff(x, u) as (x, u):
y, (J_fx, J_fu) = auto_diff.get_value_and_jacobians(f(x, u))
self._assertAllClose(y, f_xu)
self._assertAllClose(J_fx, df_dx)
self._assertAllClose(J_fu, df_du)
u = input_u
with auto_diff.AutoDiff(input_x) as x:
y, J_fx = auto_diff.get_value_and_jacobian(f(x, u))
self._assertAllClose(y, f_xu)
self._assertAllClose(J_fx, df_dx)
x = input_x
with auto_diff.AutoDiff(input_u) as u:
y, J_fu = auto_diff.get_value_and_jacobian(f(x, u))
| |
in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
elif i == j:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, ones
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrix):
raise ValueError('only use add with %s, not %s' %
tuple([c.__class__.__name__ for c in (self, other)]))
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
if M.has(nan) or M.has(oo):
raise ValueError('Cholesky decomposition applies only to '
'positive-definite matrices')
return self._new(M)
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
def col(self, j):
"""Returns column j from self as a column vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.col(0)
Matrix([
[1],
[3]])
See Also
========
row
col_list
"""
return self[:, j]
def copy(self):
return self._new(self.rows, self.cols, self._smat)
def extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[(urow.index(rk), ucol.index(ck))] = self._smat[(rk, ck)]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv.row(i_previous))
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv.col(i_previous))
return rv
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, {(i, i): S.One for i in range(n)})
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = SparseMatrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
d = self._smat
yield self.is_square
if len(d) <= self.rows:
yield fuzzy_and(
d[i, i].is_real for i, j in d if i == j)
else:
yield fuzzy_and(
d[i, i].is_real for i in range(self.rows) if (i, i) in d)
yield fuzzy_and(
((self[i, j] - self[j, i].conjugate()).is_zero
if (j, i) in d else False) for (i, j) in d)
return fuzzy_and(i for i in cond())
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self._smat) == self.rows
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
if L.has(nan) or L.has(oo) or D.has(nan) or D.has(oo):
raise ValueError('LDL decomposition applies only to '
'positive-definite | |
milliseconds
time, _ = time.split(".", 1)
except ValueError:
pass
kw["date"], kw["time"] = date, time
except ValueError: # need more than one item to unpack
kw["date"] = kw["time"] = ""
date_options = dict(
kw.get("date_options")
or self.date_options
or self.default_date_options
)
date_options["formatSubmit"] = "yyyy-mm-dd"
kw["date_options_json"] = json.dumps(date_options)
time_options = dict(
kw.get("time_options")
or self.time_options
or self.default_time_options
)
time_options["formatSubmit"] = "HH:i"
kw["time_options_json"] = json.dumps(time_options)
values = self.get_template_values(field, cstruct, kw)
template = readonly and self.readonly_template or self.template
return field.renderer(template, **values)
def deserialize(self, field, pstruct):
if pstruct is null:
return null
else:
try:
validated = self._pstruct_schema.deserialize(pstruct)
except Invalid as exc:
raise Invalid(field.schema, "Invalid pstruct: %s" % exc)
# seriously pickadate? oh. right. i forgot. you're javascript.
date = validated["date_submit"] or validated["date"]
time = validated["time_submit"] or validated["time"]
if not time and not date:
return null
result = "T".join([date, time])
if not date:
raise Invalid(field.schema, _("Incomplete date"), result)
if not time:
raise Invalid(field.schema, _("Incomplete time"), result)
return result
class TextAreaWidget(TextInputWidget):
"""
Renders a ``<textarea>`` widget.
**Attributes/Arguments**
cols
The size, in columns, of the text input field. Defaults to
``None``, meaning that the ``cols`` is not included in the
widget output (uses browser default cols).
rows
The size, in rows, of the text input field. Defaults to
``None``, meaning that the ``rows`` is not included in the
widget output (uses browser default cols).
template
The template name used to render the widget. Default:
``textarea``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/textinput``.
strip
If true, during deserialization, strip the value of leading
and trailing whitespace (default ``True``).
"""
template = "textarea"
readonly_template = "readonly/textinput"
cols = None
rows = None
strip = True
class RichTextWidget(TextInputWidget):
"""
Renders a ``<textarea>`` widget with the
:term:`TinyMCE Editor`.
To use this widget the :term:`TinyMCE Editor` library must be
provided in the page where the widget is rendered. A version of
:term:`TinyMCE Editor` is included in Deform's ``static`` directory.
**Attributes/Arguments**
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/richtext``.
delayed_load
If you have many richtext fields, you can set this option to
``True``, and the richtext editor will only be loaded upon
the user clicking the field. Default: ``False``.
**Security Note**: Enabling ``delayed_load`` can create an
HTML injection vulnerability. When enabled, any existing value
for the field will be rendered without HTML escaping. Also,
on form re-display, any user-submitted value which passes
validation will be rendered unescaped. (If the field has a
validation error, ``delayed_load`` will be disabled during
re-display.) You should not enable ``delayed_load`` unless you
trust both existing and valid user-submitted values for the field
to be 'safe HTML'.
strip
If true, during deserialization, strip the value of leading
and trailing whitespace. Default: ``True``.
template
The template name used to render the widget. Default:
``richtext``.
options
A dictionary or sequence of two-tuples containing additional
options to pass to the TinyMCE ``init`` function call. All types
within such structure should be Python native as the structure
will be converted to JSON on serialization. This widget provides
some sensible defaults, as described below in
:attr:`default_options`.
You should refer to the `TinyMCE Configuration
<https://www.tiny.cloud/docs/configure/>`_ documentation
for details regarding all available configuration options.
The ``language`` option is passed to TinyMCE within the default
template, using i18n machinery to determine the language to use.
This option can be overridden if it is specified here in ``options``.
*Note*: the ``elements`` option for TinyMCE is set automatically
according to the given field's ``oid``.
Default: ``None`` (no additional options)
Note that the RichTextWidget template does not honor the ``css_class``
or ``style`` attributes of the widget.
"""
readonly_template = "readonly/richtext"
delayed_load = False
strip = True
template = "richtext"
requirements = (("tinymce", None),)
#: Default options passed to TinyMCE. Customise by using :attr:`options`.
default_options = (
("height", 240),
("width", 0),
("skin", "lightgray"),
("theme", "modern"),
("mode", "exact"),
("strict_loading_mode", True),
("theme_advanced_resizing", True),
("theme_advanced_toolbar_align", "left"),
("theme_advanced_toolbar_location", "top"),
)
#: Options to pass to TinyMCE that will override :attr:`default_options`.
options = None
def serialize(self, field, cstruct, **kw):
if cstruct in (null, None):
cstruct = ""
readonly = kw.get("readonly", self.readonly)
options = dict(self.default_options)
# Accept overrides from keywords or as an attribute
options_overrides = dict(kw.get("options", self.options or {}))
options.update(options_overrides)
# Dump to JSON and strip curly braces at start and end
kw["tinymce_options"] = json.dumps(options)[1:-1]
values = self.get_template_values(field, cstruct, kw)
template = readonly and self.readonly_template or self.template
return field.renderer(template, **values)
class PasswordWidget(TextInputWidget):
"""
Renders a single <input type="password"/> input field.
**Attributes/Arguments**
template
The template name used to render the widget. Default:
``password``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/password``.
strip
If true, during deserialization, strip the value of leading
and trailing whitespace. Default: ``True``.
redisplay
If true, on validation failure, retain and redisplay the password
input. If false, on validation failure, this field will be
rendered empty. Default: ``False``.
"""
template = "password"
readonly_template = "readonly/password"
redisplay = False
class HiddenWidget(Widget):
"""
Renders an ``<input type="hidden"/>`` widget.
**Attributes/Arguments**
template
The template name used to render the widget. Default:
``hidden``.
"""
template = "hidden"
hidden = True
def serialize(self, field, cstruct, **kw):
if cstruct in (null, None):
cstruct = ""
values = self.get_template_values(field, cstruct, kw)
return field.renderer(self.template, **values)
def deserialize(self, field, pstruct):
if not pstruct:
return null
elif not isinstance(pstruct, string_types):
raise Invalid(field.schema, "Pstruct is not a string")
return pstruct
class CheckboxWidget(Widget):
"""
Renders an ``<input type="checkbox"/>`` widget.
**Attributes/Arguments**
true_val
The value which should be returned during deserialization if
the box is checked. Default: ``true``.
false_val
The value which should be returned during deserialization if
the box was left unchecked. Default: ``false``.
template
The template name used to render the widget. Default:
``checkbox``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/checkbox``.
"""
true_val = "true"
false_val = "false"
template = "checkbox"
readonly_template = "readonly/checkbox"
def serialize(self, field, cstruct, **kw):
readonly = kw.get("readonly", self.readonly)
template = readonly and self.readonly_template or self.template
values = self.get_template_values(field, cstruct, kw)
return field.renderer(template, **values)
def deserialize(self, field, pstruct):
if pstruct is null:
return self.false_val
elif not isinstance(pstruct, string_types):
raise Invalid(field.schema, "Pstruct is not a string")
return (pstruct == self.true_val) and self.true_val or self.false_val
class OptGroup(object):
"""
Used in the ``values`` argument passed to an instance of
``SelectWidget`` to render an ``<optgroup>`` HTML tag.
**Attributes/Arguments**
label
The label of the ``<optgroup>`` HTML tag.
options
A sequence that describes the ``<options>`` HTML tag(s). It
must have the same structure as the ``values``
argument/parameter in the ``SelectWidget`` class, but should
not contain ``OptGroup`` instances since ``<optgroup>`` HTML
tags cannot be nested.
"""
def __init__(self, label, *options):
self.label = label
self.options = options
class SelectWidget(Widget):
"""
Renders ``<select>`` field based on a predefined set of values.
**Attributes/Arguments**
values
A sequence of items where each item must be either:
- a two-tuple (the first value must be of type string, unicode
or integer, the second value must be string or unicode)
indicating allowable, displayed values, e.g. ``('jsmith',
'<NAME>')``. The first element in the tuple is the value
that should be returned when the form is posted. The second
is the display value;
- or an instance of ``optgroup_class`` (which is
``deform.widget.OptGroup`` by default).
null_value
The value which represents the null value. When the null
value is encountered during serialization, the
:attr:`colander.null` sentinel is returned to the caller.
Default: ``''`` (the empty string).
template
The template name used to render the widget. Default:
``select``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/select``.
multiple
Enable multiple on the select widget ( default: ``False`` )
optgroup_class
The class used to represent ``<optgroup>`` HTML tags. Default:
``deform.widget.OptGroup``.
long_label_generator
A function that returns the "long label" used as the
description for very old browsers that do not support the
``<optgroup>`` HTML tag. If a function is provided, the
``label`` attribute will receive the (short) description,
while the content of the ``<option>`` tag | |
get_rse(rse_id=get_rse_id(rse=rse_name, **self.vo))
assert rse.rse == rse_name
assert rse.deterministic == properties['deterministic']
assert rse.volatile == properties['volatile']
assert rse.city == properties['city']
assert rse.region_code == properties['region_code']
assert rse.country_name == properties['country_name']
assert rse.continent == properties['continent']
assert rse.time_zone == properties['time_zone']
assert rse.ISP == properties['ISP']
assert rse.staging_area == properties['staging_area']
assert rse.rse_type == RSEType.TAPE
assert rse.longitude == properties['longitude']
assert rse.latitude == properties['latitude']
assert rse.ASN == properties['ASN']
assert rse.availability == properties['availability']
with pytest.raises(Duplicate):
self.client.add_rse(rse_name)
bad_rse = 'MOCK_$*&##@!'
with pytest.raises(InvalidObject):
ret = self.client.add_rse(bad_rse)
def test_update_rse(self):
""" RSE (CLIENTS): update rse."""
# Check if updating RSE does not remove RSE tag
rse = rse_name_generator()
ret = self.client.add_rse(rse)
assert get_rse_attribute(key=rse, rse_id=get_rse_id(rse, **self.vo)) == [True]
self.client.update_rse(rse, {'availability_write': False, 'availability_delete': False})
assert get_rse_attribute(key=rse, rse_id=get_rse_id(rse, **self.vo)) == [True]
rse = rse_name_generator()
renamed_rse = 'renamed_rse%s' % rse
ret = self.client.add_rse(rse)
assert ret
ret = self.client.update_rse(rse, {'name': renamed_rse})
assert ret
dict2 = self.client.get_rse(renamed_rse)
assert renamed_rse == dict2['rse']
tmp_scope = 'mock'
nbfiles = 5
files1 = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1,
'adler32': '0cc737eb', 'meta': {'events': 10}} for i in range(nbfiles)]
replica_client = ReplicaClient()
replica_client.add_replicas(rse=renamed_rse, files=files1)
ret = self.client.update_rse(renamed_rse, {'availability_write': False, 'availability_delete': False})
assert ret
dict2 = self.client.get_rse(renamed_rse)
assert dict2['availability_write'] is False
assert dict2['availability_delete'] is False
files2 = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1,
'adler32': '0cc737eb', 'meta': {'events': 10}} for i in range(nbfiles)]
with pytest.raises(ResourceTemporaryUnavailable):
replica_client.add_replicas(rse=renamed_rse, files=files2, ignore_availability=False)
def test_list_rses(self):
""" RSE (CLIENTS): try to list rses."""
rse_list = [rse_name_generator() for i in range(5)]
for rse in rse_list:
self.client.add_rse(rse)
svr_list = [r['rse'] for r in self.client.list_rses()]
for rse in rse_list:
assert rse in svr_list
def test_get_rse(self):
""" RSE (CLIENTS): Get a RSE."""
id = 'MOCK'
props = self.client.get_rse(rse=id)
assert props['rse'] == id
# ADD PROTOCOLS
def test_add_protocol(self):
""" RSE (CLIENTS): add three protocols to rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 4,
'write': 1,
'delete': 0}
},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 18,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 20,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 2,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
resp = mgr.get_rse_info(rse=protocol_rse, **self.vo)
for p in resp['protocols']:
if ((p['port'] == 19) and (p['domains']['lan']['read'] != 1)) or \
((p['port'] == 20) and (p['domains']['lan']['read'] != 2)) or \
((p['port'] == 18) and (p['domains']['lan']['read'] != 1)) or \
((p['port'] == 17) and (p['domains']['lan']['read'] != 4)):
print(resp)
assert False
self.client.delete_protocols(protocol_rse, scheme='MOCK')
self.client.delete_rse(protocol_rse)
def test_add_protocol_rse_not_found(self):
""" RSE (CLIENTS): add a protocol to an rse that does not exist (RSENotFound)."""
attributes = {'hostname': 'localhost',
'scheme': 'MOCK_Fail',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
with pytest.raises(RSENotFound):
self.client.add_protocol('The One that shouldn\'t be here', attributes)
def test_add_protocol_missing_values(self):
""" RSE (CLIENTS): add a protocol with insufficient parameters (InvalidObject)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
attributes = {'hostname': 'localhost',
'scheme': 'MOCK_Fail',
'port': 17,
'prefix': '/the/one/with/all/the/files',
# 'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
try:
with pytest.raises(exception.InvalidObject):
self.client.add_protocol(protocol_rse, attributes)
finally:
self.client.delete_rse(protocol_rse)
def test_add_protocol_duplicate(self):
""" RSE (CLIENTS): add duplicate protocol to rse (Duplicate)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
attributes = {'hostname': 'localhost',
'scheme': 'MOCK_Duplicate',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
try:
self.client.add_protocol(protocol_rse, attributes)
with pytest.raises(exception.Duplicate):
self.client.add_protocol(protocol_rse, attributes)
finally:
self.client.delete_rse(protocol_rse)
def test_add_protocol_not_suppotred_domain(self):
""" RSE (CLIENTS): add a protocol with unsupported domain parameters (RSEProtocolDomainNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
attributes = {'hostname': 'localhost',
'scheme': 'Mock_Insuff_Params',
'port': 17,
'prefix': '/the/one/with/all/the/files',
# 'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'FIRENDS': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
try:
with pytest.raises(exception.RSEProtocolDomainNotSupported):
self.client.add_protocol(protocol_rse, attributes)
finally:
self.client.delete_rse(protocol_rse)
def test_add_protocol_wrong_priority(self):
""" RSE (CLIENTS): Add a protocol with an invalid priority for ranking. """
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocol_ports = [17, 29, 42]
for i in range(3):
attributes = {'hostname': 'localhost',
'scheme': 'MOCK',
'port': protocol_ports[i],
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
self.client.add_protocol(protocol_rse, attributes)
try:
attributes = {'hostname': 'localhost',
'scheme': 'MOCK',
'port': 815,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 4,
'write': 99,
'delete': -1}},
'extended_attributes': 'TheOneWithAllTheRest'}
with pytest.raises(exception.RSEProtocolPriorityError):
self.client.add_protocol(protocol_rse, attributes)
finally:
self.client.delete_rse(protocol_rse)
# DELETE PROTOCOLS
def test_del_protocol_rse_not_found(self):
""" RSE (CLIENTS): delete a protocol from an rse that does not exist (RSENotFound)."""
with pytest.raises(RSENotFound):
self.client.delete_protocols('The One that shouldn\'t be here', 'MOCK_Fail')
def test_del_protocol_id(self):
""" RSE (CLIENTS): delete multiple protocols with the same identifier from an rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocol_id = 'MOCK_DEL_ID_SUCCESS'
protocol_ports = [17, 29, 42]
for i in range(3):
attributes = {'hostname': 'localhost',
'scheme': protocol_id,
'port': protocol_ports[i],
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}}}
self.client.add_protocol(protocol_rse, attributes)
try:
self.client.delete_protocols(protocol_rse, protocol_id)
# check if empty
resp = mgr.get_rse_info(rse=protocol_rse, **self.vo)
with pytest.raises(RSEProtocolNotSupported):
mgr.select_protocol(resp, 'read', scheme=protocol_id)
finally:
self.client.delete_rse(protocol_rse)
def test_del_protocol_id_protocol_not_supported(self):
""" RSE (CLIENTS): delete a none-existing protocol from an rse (RSEProtocolNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
try:
with pytest.raises(exception.RSEProtocolNotSupported):
self.client.delete_protocols(protocol_rse, 'MOCK_Fail')
finally:
self.client.delete_rse(protocol_rse)
def test_del_protocol_hostname(self):
""" RSE (CLIENTS): delete multiple protocols with the same identifier, and the same hostname from an rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocol_id = 'MOCK_DEL_HOST_SUCCESS'
protocol_hostname = ['localhost', 'an_other_host', 'localhost']
protocol_ports = [17, 29, 42]
for i in range(3):
attributes = {'hostname': protocol_hostname[i],
'scheme': protocol_id,
'port': protocol_ports[i],
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
self.client.add_protocol(protocol_rse, attributes)
self.client.delete_protocols(protocol_rse, scheme=protocol_id, hostname='localhost')
# check if protocol for 'other_host' are still there
resp = mgr.get_rse_info(rse=protocol_rse, **self.vo)
for r in resp['protocols']:
if r['hostname'] == 'localhost':
self.client.delete_rse(protocol_rse)
raise Exception('Protocols not deleted. Remaining: %s' % resp)
self.client.delete_protocols(protocol_rse, scheme=protocol_id, hostname='an_other_host')
self.client.delete_rse(protocol_rse)
def test_del_protocol_hostname_protocol_not_supported(self):
""" RSE (CLIENTS): delete a non-existing protocol from an rse with given hostname (RSEProtocolNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
try:
attributes = {'hostname': 'localhost',
'scheme': 'MOCK_PROTOCOL_DEL_HOST_FAIL',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
self.client.add_protocol(protocol_rse, attributes)
with pytest.raises(exception.RSEProtocolNotSupported):
self.client.delete_protocols(protocol_rse, attributes['scheme'], hostname='an_other_host')
self.client.delete_protocols(protocol_rse, attributes['scheme'], hostname=attributes['hostname'])
finally:
self.client.delete_rse(protocol_rse)
def test_del_protocol_port(self):
""" RSE (CLIENTS): delete a specific protocol from an rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocol_id = 'MOCK_DEL_PORT_SUCCESS'
protocol_hostname = ['localhost', 'an_other_host', 'localhost']
protocol_ports = [17, 29, 42]
for i in range(3):
attributes = {'hostname': protocol_hostname[i],
'scheme': protocol_id,
'port': protocol_ports[i],
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
self.client.add_protocol(protocol_rse, attributes)
self.client.delete_protocols(protocol_rse, scheme=protocol_id, hostname='localhost', port=17)
# check remaining protocols
resp = mgr.get_rse_info(rse=protocol_rse, **self.vo)
for r in resp['protocols']:
if r['port'] == 17:
self.client.delete_protocols(protocol_rse, protocol_id)
self.client.delete_rse(protocol_rse)
raise Exception('Protocols not deleted. Remaining: %s' % resp)
self.client.delete_protocols(protocol_rse, protocol_id)
self.client.delete_rse(protocol_rse)
def test_del_protocol_port_protocol_not_supported(self):
""" RSE (CLIENTS): delete a specific protocol from an rse. (RSEProtocolNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
attributes = {'hostname': 'localhost',
'scheme': 'MOCK_PROTOCOL_DEL_PORT_FAIL',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
self.client.add_protocol(protocol_rse, attributes)
try:
with pytest.raises(exception.RSEProtocolNotSupported):
self.client.delete_protocols(protocol_rse, 'MOCK_Fail', hostname='localhost', port=17)
finally:
self.client.delete_rse(protocol_rse)
# GET PROTOCOLS
def test_get_protocols(self):
""" RSE (CLIENTS): get protocols of rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK_READ',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1},
'wan': {'read': 0,
'write': 0,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_WRITE',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1},
'wan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_DELETE',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 0,
'delete': 1},
'wan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
# GET all = 3
resp = mgr.get_rse_info(rse=protocol_rse, **self.vo)
if len(resp['protocols']) != 3:
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
raise Exception('Unexpected protocols returned: %s' % resp)
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
def test_get_protocols_rse_not_found(self):
""" RSE (CLIENTS): get all protocols of rse (RSENotFound)."""
| |
int|float
"""
return self["borderpad"]
@borderpad.setter
def borderpad(self, val):
self["borderpad"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the annotation
`text`.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# captureevents
# -------------
@property
def captureevents(self):
"""
Determines whether the annotation text box captures mouse move
and click events, or allows those events to pass through to
data points in the plot that may be behind the annotation. By
default `captureevents` is False unless `hovertext` is
provided. If you use the event `plotly_clickannotation` without
`hovertext` you must explicitly enable `captureevents`.
The 'captureevents' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["captureevents"]
@captureevents.setter
def captureevents(self, val):
self["captureevents"] = val
# font
# ----
@property
def font(self):
"""
Sets the annotation text font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.annotation.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.annotation.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# height
# ------
@property
def height(self):
"""
Sets an explicit height for the text box. null (default) lets
the text set the box height. Taller text will be clipped.
The 'height' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["height"]
@height.setter
def height(self, val):
self["height"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.annotation.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover label.
By default uses the annotation's `bgcolor` made
opaque, or white if it was transparent.
bordercolor
Sets the border color of the hover label. By
default uses either dark grey or white, for
maximum contrast with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses
the global hover font and size, with color from
`hoverlabel.bordercolor`.
Returns
-------
plotly.graph_objs.layout.scene.annotation.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets text to appear when hovering over this annotation. If
omitted or blank, no hover label will appear.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the annotation (text + arrow).
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# showarrow
# ---------
@property
def showarrow(self):
"""
Determines whether or not the annotation is drawn with an
arrow. If True, `text` is placed near the arrow's tail. If
False, `text` lines up with the `x` and `y` provided.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
# standoff
# --------
@property
def standoff(self):
"""
Sets a distance, in pixels, to move the end arrowhead away from
the position it is pointing at, for example to point at the
edge of a marker independent of zoom. Note that this shortens
the arrow from the `ax` / `ay` vector, in contrast to `xshift`
/ `yshift` which moves everything by this amount.
The 'standoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["standoff"]
@standoff.setter
def standoff(self, val):
self["standoff"] = val
# startarrowhead
# --------------
@property
def startarrowhead(self):
"""
Sets the start annotation arrow head style.
The 'startarrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["startarrowhead"]
@startarrowhead.setter
def startarrowhead(self, val):
self["startarrowhead"] = val
# startarrowsize
# --------------
@property
def startarrowsize(self):
"""
Sets the size of the start annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'startarrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["startarrowsize"]
@startarrowsize.setter
def startarrowsize(self, val):
self["startarrowsize"] = val
# startstandoff
# -------------
@property
def startstandoff(self):
"""
Sets a distance, in pixels, to move the start arrowhead away
from the position it is pointing at, for example to point at
the edge of a marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector, in contrast to
`xshift` / `yshift` which moves everything by this amount.
The 'startstandoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["startstandoff"]
@startstandoff.setter
def startstandoff(self, val):
self["startstandoff"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# text
# ----
@property
def text(self):
"""
Sets the text associated with this annotation. Plotly uses a
subset of HTML tags | |
<gh_stars>0
from __future__ import annotations
from enum import Enum, IntEnum
import math
from dataclasses import dataclass, field
from typing import Tuple, List, Union
from pydantic import BaseModel, Field
import numpy as np
class Point(BaseModel):
x: float
y: float
def __mul__(self, r: TransformationMatrix) -> Point:
return Point(
x=self.x * r.a + self.y * r.c + r.e,
y=self.x * r.b + self.y * r.d + r.f
)
def __truediv__(self, other: float) -> Point:
return Point.construct(x=self.x / other, y = self.y / other)
def __add__(self, other: Point) -> Point:
return Point.construct(
x=self.x + other.x,
y = self.y + other.y,
)
def __sub__(self, other: Point) -> Point:
return Point.construct(
x=self.x - other.x,
y = self.y - other.y,
)
def to_cv(self) -> Tuple[int, int]:
return (int(self.x), int(self.y))
def __lt__(self, other: Point) -> bool:
if self.x < other.x:
return True
if self.x == other.x and self.y < other.y:
return True
return False
def length(self) -> float:
return math.sqrt(self.x * self.x + self.y * self.y)
def angle(self) -> float:
return (180 * math.atan2(self.y, self.x)) / math.pi
def intersect_line_segments(a_left: float, a_right: float, b_left: float, b_right: float) -> Tuple[float, float]:
left = max(a_left, b_left)
right = min(a_right, b_right)
return (left, right) if left <= right else (0, 0)
class Segment(BaseModel):
a: Point
b: Point
def length(self) -> float:
return math.sqrt((self.b.x - self.a.x) ** 2 + (self.b.y - self.a.y) ** 2)
class Box(BaseModel):
min_x: Union[int, float]
min_y: Union[int, float]
max_x: Union[int, float]
max_y: Union[int, float]
def __contains__(self, other: Box) -> bool:
return self.min_x <= other.min_x and other.min_x <= self.max_x \
and self.min_x <= other.max_x and other.max_x <= self.max_x \
and self.min_y <= other.max_y and other.max_y <= self.max_y \
and self.min_y <= other.min_y and other.min_y <= self.max_y
def as_int(self) -> Box:
return Box.construct(
min_x=int(self.min_x),
min_y=int(self.min_y),
max_x=int(self.max_x),
max_y=int(self.max_y),
)
def width(self) -> float:
return self.max_x - self.min_x
def height(self) -> float:
return self.max_y - self.min_y
def largest_side(self) -> float:
return max(self.width(), self.height())
def min_point(self) -> Point:
return Point.construct(
x=self.min_x,
y=self.min_y,
)
def max_point(self) -> Point:
return Point.construct(
x=self.max_x,
y=self.max_y,
)
def center(self) -> Point:
return Point.construct(
x = (self.min_x + self.max_x) / 2,
y = (self.min_y + self.max_y) / 2,
)
def extract_from(self, img: np.array) -> np.array:
if len(img.shape) == 2:
return img[max(0, int(self.min_y)): int(self.max_y), max(0, int(self.min_x)): int(self.max_x)]
return img[max(0, int(self.min_y)): int(self.max_y), max(0, int(self.min_x)): int(self.max_x), :]
@staticmethod
def from_array(box: np.array) -> Box:
return Box.construct(
min_x=box[0],
min_y=box[1],
max_x=box[2],
max_y=box[3],
)
@staticmethod
def from_cv_array(bb: np.array) -> Box:
return Box.construct(
min_x=bb[0],
min_y=bb[1],
max_x=bb[0] + bb[2],
max_y=bb[1] + bb[3],
)
def to_array(self) -> np.array:
return np.array([int(self.min_x), int(self.min_y), int(self.max_x), int(self.max_y)])
# def to_array(self) -> np.array:
# return np.array([int(self.min_y), int(self.min_x), int(self.max_y), int(self.max_x)])
def area(self) -> float:
return (self.max_x - self.min_x) * (self.max_y - self.min_y)
def intersect(self, other: Box) -> Box:
min_x, max_x = intersect_line_segments(
self.min_x, self.max_x, other.min_x, other.max_x)
min_y, max_y = intersect_line_segments(
self.min_y, self.max_y, other.min_y, other.max_y)
box = Box.construct(
min_x=min_x,
min_y=min_y,
max_x=max_x,
max_y=max_y,
)
return box if box.area() > 0 else EMPTY_BOX
def intersection_over_union(self, other: Box) -> float:
intersection = self.intersect(other)
union_area = self.area() + other.area() - intersection.area()
if union_area < 1e-9:
return 0
return intersection.area() / float(union_area)
def intersection_over_self_area(self, other: Box) -> float:
intersection = self.intersect(other)
area = self.area()
if area < 1e-9:
return 0
return intersection.area() / float(area)
def __mul__(self, tm: TransformationMatrix) -> Box:
a = Point.construct(x=self.min_x, y=self.min_y) * tm
b = Point.construct(x=self.max_x, y=self.max_y) * tm
return Box.construct(
min_x=min(a.x, b.x),
min_y=min(a.y, b.y),
max_x=max(a.x, b.x),
max_y=max(a.y, b.y),
)
def extend_relative(self, relative_increase: float) -> Box:
w = self.width()
h = self.height()
dx = (w * relative_increase) / 2
dy = (h * relative_increase) / 2
return self.extend(dx, dy)
def extend_at_least(self, min_width: float, min_height: float) -> Box:
return self.extend(
dx=max(0, min_width - self.width()) / 2,
dy=max(0, min_height - self.height()) / 2,
)
def extend(self, dx: float = 0, dy: float = 0) -> Box:
return Box(
min_x=self.min_x - dx,
min_y=self.min_y - dy,
max_x=self.max_x + dx,
max_y=self.max_y + dy
)
def clip(self, shape: np.array) -> Box:
return Box(
min_x=max(self.min_x, 0),
min_y=max(self.min_y, 0),
max_x=min(self.max_x, shape[1]),
max_y=min(self.max_y, shape[0]),
)
def make_square(self) -> Box:
if self.width() < self.height():
return self.extend(dx=(self.height() - self.width()) / 2)
return self.extend(dy=(self.width() - self.height()) / 2)
def points_bounding_box(points: List[Point]) -> Box:
return Ring.construct(points=points).bounding_box()
class Ring(BaseModel):
points: List[Point] = Field(default_factory=list)
def __mul__(self, tm: TransformationMatrix) -> Ring:
ring = Ring.construct(points=[])
for point in self.points:
ring.points.append(point * tm)
return ring
def segments(self) -> List[Segment]:
i = 0
j = len(self.points) - 1
result = []
while i < len(self.points):
result.append(Segment.construct(
a=self.points[j],
b=self.points[i]
))
j = i
i += 1
return result
def bounding_box(self) -> Box:
if not self.points:
return Box.construct(min_x=0, min_y=0, max_x=0, max_y=0)
min_x = self.points[0].x
min_y = self.points[0].y
max_x = self.points[0].x
max_y = self.points[0].y
for p in self.points[1:]:
min_x = min(min_x, p.x)
min_y = min(min_y, p.y)
max_x = max(max_x, p.x)
max_y = max(max_y, p.y)
return Box.construct(min_x=min_x, min_y=min_y, max_x=max_x, max_y=max_y)
def perimeter(self) -> float:
return sum(s.length() for s in self.segments())
def signed_area(self) -> float:
result = 0.0
for s in self.segments():
result += (s.a.x + s.b.x) * (s.b.y - s.a.y)
return result / 2
def area(self) -> float:
return abs(self.signed_area())
class Polygon(BaseModel):
exterior: Ring = Field(default_factory=Ring)
interior: List[Ring] = Field(default_factory=list)
class TransformationMatrix(BaseModel):
a: float = 1
b: float = 0
c: float = 0
d: float = 1
e: float = 0
f: float = 0
def __mul__(self, other: TransformationMatrix) -> TransformationMatrix:
return TransformationMatrix.construct(
a=self.a * other.a + self.b * other.c,
b=self.a * other.b + self.b * other.d,
c=self.c * other.a + self.d * other.c,
d=self.c * other.b + self.d * other.d,
e=self.e * other.a + self.f * other.c + other.e,
f=self.e * other.b + self.f * other.d + other.f,
)
def inverse(self) -> TransformationMatrix:
det = self.a * self.d - self.b * self.c
return TransformationMatrix.construct(
a=self.d / det,
b=-self.b / det,
c=-self.c / det,
d=self.a / det,
e=(self.c * self.f - self.d * self.e) / det,
f=(self.b * self.e - self.a * self.f) / det,
)
def vector_length(x: float, y: float) -> float:
return math.sqrt(x**2 + y**2)
def tm_rotate(x: float, y: float) -> TransformationMatrix:
length = vector_length(x, y)
cos = x / length
sin = y / length
return TransformationMatrix.construct(
a=cos,
b=-sin,
c=sin,
d=cos,
e=0,
f=0,
)
def rm_rotate_angle_degrees(angle: float) -> TransformationMatrix:
angle = (angle * math.pi) / 180
return TransformationMatrix.construct(
a=math.cos(angle),
b=-math.sin(angle),
c=math.sin(angle),
d=math.cos(angle),
e=0,
f=0,
)
def tm_translate(dx: float, dy: float) -> TransformationMatrix:
return TransformationMatrix.construct(
a=1,
b=0,
c=0,
d=1,
e=dx,
f=dy,
)
def tm_scale(x_scale: float, y_scale: float) -> TransformationMatrix:
return TransformationMatrix.construct(
a=x_scale,
b=0,
c=0,
d=y_scale,
e=0,
f=0
)
def tm_flip_vertical() -> TransformationMatrix:
return TransformationMatrix.construct(
a=1,
b=0,
c=0,
d=-1,
e=0,
f=0
)
def tm_flip_horizontal() -> TransformationMatrix:
return TransformationMatrix.construct(a=-1, b=0, c=0, d=1, e=0, f=0)
def tm_box(from_box: Box, to_box: Box) -> TransformationMatrix:
return tm_translate(
dx= - from_box.min_x,
dy= - from_box.min_y,
) * scale_transform(
from_width=from_box.width(),
from_height=from_box.height(),
to_width=to_box.width(),
to_height=to_box.height(),
) * tm_translate(
dx= to_box.min_x,
dy= to_box.min_y,
)
def scale_transform(from_width: float, from_height: float, to_width: float, to_height: float) -> TransformationMatrix:
return tm_scale(to_width / float(from_width), to_height / float(from_height))
EMPTY_BOX = Box.construct(min_x=0, min_y=0, max_x=0, max_y=0)
_INSIDE = 0
_LEFT = 1
_RIGHT = 2
_BOTTOM = 4
_TOP = 8
def _compute_out_code(x: float, y: float, r: np.array) -> int:
code = _INSIDE
if x < r[0]: # to the left of clip window
code |= _LEFT
elif x > r[2]: # to the right of clip window
code |= _RIGHT
if y < r[1]: # below the clip window
code |= _BOTTOM
elif y > r[3]: # above the clip window
code |= _TOP
return code
def cohen_sutherland_line_clip(s: np.array, r: np.array) -> Tuple[np.array, bool]:
outcode0 = _compute_out_code(s[0], s[1], r)
outcode1 = _compute_out_code(s[2], s[3], r)
x0, y0, x1, y1 = s
xmin, ymin, xmax, ymax = r
accept = False
while True:
if not (outcode0 | outcode1):
# bitwise OR is 0: both points inside window trivially accept and exit loop
accept = True
break
elif outcode0 & outcode1:
# bitwise AND is not 0: both points share an outside zone (LEFT, RIGHT, TOP,
# or BOTTOM), so both must be outside window exit loop (accept is false)
break
else:
# failed both tests, so calculate the line | |
22360143,
92416,
-65192,
22421877,
92481,
-1,
22479932,
92451,
-1,
22351846,
92456,
22741340,
22295412,
92433,
-1,
22685146,
92477,
-65187,
22685043,
92492,
-65186,
22812508,
92513,
-1,
22873108,
92491,
23069033,
22224003,
-1,
23134566,
23007072,
-1,
23200100,
23081844,
-1,
-65181,
23142287,
92484,
-1,
23200841,
92439,
-65179,
23146575,
92464,
-1,
23334886,
92495,
-65177,
23082081,
92446,
-65176,
23471475,
92511,
-1,
23533602,
92406,
23724402,
23013055,
-1,
23789933,
23671668,
-1,
-65172,
23737769,
92443,
-1,
23800034,
92509,
23986544,
23727968,
-1,
-65169,
23934380,
92462,
-1,
23990112,
92506,
-65167,
23927226,
92461,
-1,
24118765,
92469,
24314234,
23673151,
-1,
24379768,
24261492,
-1,
24445303,
24322890,
-1,
-65162,
24389311,
92470,
-1,
24453007,
92468,
-1,
24393923,
92453,
-65159,
24318056,
92429,
-1,
24642613,
92421,
24838528,
24258906,
-1,
24904062,
24776811,
-1,
-65155,
24847395,
92490,
-1,
24912213,
92512,
-65153,
24844459,
92494,
-1,
25035813,
92455,
25231747,
24786689,
-1,
-65150,
25179025,
92411,
-1,
25232877,
92402,
25428358,
25171168,
-1,
-65147,
25367006,
92488,
-1,
25428828,
92434,
25690111,
25367215,
-1,
-65144,
25563644,
92436,
-1,
25625436,
92517,
25821724,
14813429,
-1,
25887166,
25764213,
-1,
25952664,
25826332,
-1,
26018195,
25891868,
-1,
26083730,
25965428,
92362,
-65137,
26027110,
92341,
-65136,
26091407,
92347,
-65135,
26152966,
92370,
-1,
26215488,
92336,
-1,
26021992,
92307,
26476950,
25959204,
-1,
-65131,
26424346,
92303,
-1,
26478097,
92309,
-65129,
26420293,
92368,
-1,
26609169,
92395,
26804640,
25898174,
-1,
26870172,
26751860,
-1,
-65125,
26813937,
92359,
-1,
26878325,
92374,
-65123,
26813509,
92371,
-65122,
27006522,
92351,
-65121,
27070953,
92325,
-1,
27132742,
92350,
27328937,
26742006,
-1,
27394468,
27267540,
-1,
-65117,
27340879,
92349,
-1,
27395159,
92308,
27591079,
27329350,
-1,
-65114,
27535039,
92378,
-1,
27598735,
92375,
-65112,
27534417,
92317,
-1,
27727879,
92331,
27918769,
27269776,
-1,
27984301,
27856736,
-1,
-65108,
27932117,
92389,
-1,
27987942,
92360,
28180912,
27919174,
-1,
-65105,
28123023,
92367,
-1,
28184671,
92356,
-1,
28128306,
92333,
28443063,
27865167,
-1,
28508597,
28381024,
-1,
-65100,
28456087,
92345,
-1,
28516725,
92372,
-65098,
28455801,
92314,
-1,
28640092,
92340,
28836282,
28391909,
-1,
-65095,
28774917,
92323,
-1,
28836678,
92304,
29032893,
28771142,
-1,
-65092,
28977378,
92396,
-1,
29041368,
92338,
-1,
28981690,
92313,
29295067,
25829263,
-1,
29360593,
29230881,
-1,
29426118,
29298528,
-1,
29491652,
29373300,
-1,
-65085,
29440248,
92385,
-1,
29499279,
92315,
-65083,
29436328,
92354,
-1,
29626427,
92311,
29819337,
29366108,
92381,
-65080,
29765711,
92326,
-1,
29829953,
92343,
30015950,
29754182,
-1,
30081485,
29950792,
-1,
-65076,
30025598,
92387,
-1,
30083341,
92318,
-1,
30024508,
92383,
30408703,
29963124,
-1,
-65072,
30281568,
92324,
-1,
30344229,
92364,
30540248,
29295430,
-1,
30605781,
30478176,
-1,
-65068,
30551546,
92363,
-1,
30613559,
92382,
30867455,
30539776,
-1,
-65065,
30745311,
92376,
-1,
30809077,
92339,
-65063,
30487432,
92391,
-65062,
30939170,
92390,
-1,
31003737,
92344,
31195628,
29236004,
-1,
31261159,
31133536,
-1,
31326691,
31208308,
-1,
31392225,
31272126,
-1,
-65056,
31332002,
92319,
-1,
31396434,
92384,
-65054,
31334287,
92386,
-1,
31529875,
92321,
31719910,
31269239,
-1,
-65051,
31661340,
92393,
-1,
31724285,
92394,
-1,
31666255,
92361,
31982058,
31208308,
-1,
-65047,
31928399,
92366,
-1,
31991083,
92373,
-65045,
31926029,
92328,
-1,
32113500,
92329,
32309747,
31144255,
-1,
32375281,
32256884,
-1,
-65041,
32323779,
92388,
-65040,
32382863,
92305,
-1,
32440566,
92322,
-65038,
32313888,
92327,
-1,
32572252,
92377,
32768507,
32256079,
-1,
32834039,
32709142,
-1,
-65034,
32781206,
92358,
-1,
32834386,
92334,
33030650,
32771936,
-1,
-65031,
32978364,
92346,
-1,
33042511,
92400,
-1,
32965487,
92332,
33292803,
32713918,
-1,
33358335,
33232304,
-1,
-65026,
33305650,
92379,
-1,
33364413,
92392,
33554946,
33296224,
-1,
-65023,
33502677,
92330,
-1,
33567100,
92380,
-1,
33502114,
92335,
33817097,
33234051,
-1,
33882631,
33751878,
-1,
-65018,
33826495,
92342,
-1,
33890191,
92357,
-65016,
33829748,
92365,
-1,
34019773,
92397,
34210318,
33760959,
-1,
34275853,
34150236,
-1,
-65012,
34220865,
92316,
-1,
34283893,
92398,
-1,
34213992,
92399,
34538003,
34149404,
-1,
34603538,
34477502,
-1,
-65007,
34548136,
92310,
-1,
34612927,
92320,
-1,
34539338,
92312,
34865686,
34486017,
-1,
-65003,
34813106,
92369,
-1,
34866597,
92306,
35062297,
34813539,
-1,
-65000,
35010808,
92337,
-1,
35062861,
92353,
-64998,
35007658,
92355,
-64997,
35197976,
92348,
-1,
35260622,
92352,
35455642,
25756488,
-1,
35521088,
35398005,
-1,
35586596,
35467343,
-1,
35652131,
35522029,
92223,
35782655,
35586048,
-1,
-64990,
35660547,
92194,
-1,
35725558,
92185,
-1,
35595655,
92178,
35979818,
35525660,
-1,
36045352,
35921007,
-1,
-64985,
35988292,
92182,
-1,
36045040,
92160,
-64983,
35984566,
92229,
-1,
36177341,
92173,
36373038,
35928383,
-1,
36438573,
36312924,
92239,
-1,
36384900,
92225,
-1,
36376995,
92233,
36635185,
36321765,
-1,
-64976,
36582524,
92191,
-1,
36635467,
92167,
36831796,
36580787,
-1,
-64973,
36771874,
92237,
-1,
36832070,
92241,
37028407,
36772644,
-1,
-64970,
36968726,
92180,
-1,
37028713,
92227,
37225018,
36965658,
-1,
-64967,
37160347,
92212,
-1,
37224635,
92201,
37421629,
37159750,
-1,
-64964,
37365986,
92245,
-1,
37421899,
92164,
-64962,
37362338,
92231,
-64961,
37558446,
92204,
-1,
37621864,
92244,
37814871,
35463055,
-1,
37880396,
37749574,
-1,
37945930,
37818208,
-1,
38011463,
37887887,
92238,
-64955,
37959286,
92174,
-64954,
38018137,
92203,
-1,
38078390,
92171,
-64952,
37957114,
92230,
-64951,
38216508,
92210,
-1,
38280189,
92186,
-64949,
37889732,
92221,
-1,
38413029,
92175,
38601297,
37823306,
-1,
38666832,
38544453,
-1,
-64945,
38611138,
92198,
-1,
38670926,
92214,
-1,
38609274,
92190,
38928980,
38536998,
-1,
-64941,
38873314,
92246,
-1,
38937404,
92232,
-64939,
38876775,
92176,
-64938,
39064659,
92196,
-1,
39129613,
92197,
39322215,
37760190,
-1,
39387744,
39261616,
-1,
39453276,
39334772,
-1,
-64933,
39395215,
92215,
-1,
39456957,
92165,
39649887,
39393116,
-1,
-64930,
39594408,
92222,
-1,
39657933,
92168,
-1,
39593338,
92224,
39912036,
39330634,
-1,
-64926,
39858255,
92235,
-64925,
39921890,
92220,
-1,
39985728,
92208,
-64923,
39859666,
92181,
-64922,
40114667,
92195,
-1,
40178201,
92177,
40370808,
39265983,
-1,
40436335,
40305478,
-1,
40501868,
40371014,
-1,
-64917,
40446376,
92226,
-1,
40509327,
92236,
-64915,
40447428,
92187,
-64914,
40640843,
92189,
-1,
40698043,
92200,
40895094,
40379497,
-1,
40960627,
40838886,
-1,
-64910,
40899680,
92169,
-1,
40961830,
92170,
41222143,
40902907,
-1,
-64907,
41096288,
92162,
-1,
41158438,
92163,
-64905,
40842418,
92205,
-1,
41292195,
92202,
41484929,
40311588,
-1,
41550460,
41422688,
-1,
-64901,
41497674,
92179,
-1,
41562191,
92211,
41747071,
41485126,
-1,
-64898,
41691362,
92240,
-1,
41751220,
92199,
-64896,
41694382,
92213,
-1,
41887771,
92219,
42074761,
41431119,
-1,
42140293,
42021748,
-1,
-64892,
42075209,
92206,
-1,
42139874,
92172,
42336904,
42078676,
-1,
-64889,
42283376,
92209,
-1,
42337340,
92234,
-1,
42277425,
92166,
42599055,
42015875,
-1,
42664588,
42546036,
92243,
-1,
42599497,
92207,
-64883,
42607708,
92216,
-64882,
42733990,
92184,
-1,
42796528,
92183,
42992276,
42538012,
-1,
43057811,
42931868,
-1,
-64878,
43002280,
92188,
-1,
43062512,
92193,
-1,
42993638,
92161,
43319959,
42939252,
-1,
-64874,
43262553,
92228,
-1,
43319994,
92192,
-64872,
43267587,
92242,
-64871,
43461663,
92218,
-1,
43520877,
92217,
43713259,
35391267,
-1,
43778734,
43655055,
-1,
43844259,
43714337,
-1,
43909792,
43778886,
-1,
-64865,
43845143,
92255,
-1,
43909338,
92252,
-64863,
43857070,
92292,
-64862,
44046873,
92268,
-1,
44110385,
92286,
44303015,
43778886,
92300,
44433407,
44237054,
-1,
-64858,
44315975,
92296,
-1,
44372463,
92295,
44565162,
44241583,
-1,
-64855,
44508269,
92267,
-1,
44571265,
92284,
44761773,
44503508,
-1,
-64852,
44707985,
92290,
-1,
44771554,
92281,
-1,
44705045,
92287,
45023935,
43721077,
-1,
45089463,
44962844,
-1,
45154997,
45028380,
-1,
45220531,
45102254,
92282,
-1,
45154484,
92283,
-64844,
45158871,
92277,
-1,
45286239,
92298,
-64842,
45098053,
92302,
-1,
45423642,
92276,
45613754,
45037887,
-1,
-64839,
45553517,
92269,
-1,
45614121,
92288,
45810365,
45558974,
-1,
-64836,
45750092,
92247,
-1,
45813864,
92299,
-64834,
45756929,
92260,
-1,
45944609,
92261,
46138055,
44969150,
-1,
46203587,
46077516,
-1,
-64830,
46150555,
92266,
-1,
46215322,
92265,
-64828,
46150675,
92285,
-64827,
46340130,
92249,
-64826,
46404013,
92250,
-1,
46466259,
92279,
46662352,
46078756,
-1,
46727884,
46600660,
-1,
-64822,
46676398,
92272,
-64821,
46737602,
92270,
-1,
46793765,
92294,
46990031,
46667612,
-1,
-64818,
46934242,
92301,
-1,
46993391,
92253,
-1,
46925065,
92256,
47252181,
46606015,
-1,
-64814,
47199376,
92289,
-64813,
47258134,
92263,
-64812,
47321088,
92257,
-1,
47384171,
92274,
47579867,
47193219,
-1,
47645401,
47514438,
-1,
-64808,
47587224,
92259,
-1,
47646044,
92273,
-64806,
47588424,
92251,
-1,
47780259,
92297,
47973088,
47525967,
-1,
48038623,
47919990,
-1,
-64802,
47981279,
92264,
-1,
48046024,
92248,
-1,
47973215,
92280,
48300771,
47921471,
-1,
-64798,
48239140,
92254,
-1,
48301211,
92258,
48497382,
48239863,
-1,
-64795,
48444300,
92262,
-1,
48505205,
92271,
48693993,
48435973,
-1,
-64792,
48641493,
92291,
-1,
48697184,
92293,
-64790,
48642290,
92278,
-1,
48838257,
92275,
49086463,
43651761,
-1,
49087226,
48963957,
-1,
49152751,
49034100,
92697,
-1,
49087508,
92696,
49283826,
49093264,
-1,
-64783,
49230728,
92698,
-1,
49284133,
92688,
49480437,
49222684,
-1,
-64780,
49421427,
92711,
-1,
49484837,
92712,
-64778,
49428915,
92681,
-64777,
49623375,
92704,
-64776,
49688000,
92702,
-64775,
49747804,
92708,
-1,
49810728,
92687,
50004739,
49032382,
-1,
50070271,
49943984,
-1,
-64771,
50017140,
92700,
-64770,
50078538,
92713,
-1,
50141355,
92691,
-64768,
50017746,
92690,
-64767,
50272092,
92692,
| |
<filename>Lab2/Step3.py
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 17:58:26 2020
@author: <NAME>
"""
import pymongo as pm
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, DayLocator
import numpy as np
import pandas as pd
import warnings
import statsmodels.api as sm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import r2_score
from statsmodels.tsa.arima_model import ARIMA
import seaborn as sb
warnings.filterwarnings('ignore')
#%% Connection to the server
client = pm.MongoClient('bigdatadb.polito.it',
ssl=True,
authSource = 'carsharing',
tlsAllowInvalidCertificates=True)
db = client['carsharing'] #Choose the DB to use
db.authenticate('ictts', 'Ictts16!')#, mechanism='MONGODB-CR') #authentication
#%%Extraction of times series from MongoDB server
start = datetime(2017,1,7,0,0,0)
end = datetime(2017,2,5,23,59,59)
cars_per_hour_filtered_list=[]
cities = ["Milano", "Calgary", "Amsterdam"]
for c in cities:
cars_per_hour_filtered = db.get_collection("PermanentBookings").aggregate(
[
{ "$match" : {"$and": [ { "city": c },
{ "init_date": { "$gte": start } },
{ "final_date": { "$lte": end } },
]
}
},
{ "$project": {
"_id": 1,
"city": 1,
"moved": { "$ne": [
{"$arrayElemAt": [ "$origin_destination.coordinates", 0]},
{"$arrayElemAt": [ "$origin_destination.coordinates", 1]}
]
},
"duration": { "$divide": [ { "$subtract": ["$final_time",
"$init_time"] }, 60 ] },
"date_parts": { "$dateToParts": { "date": "$init_date" } },
}
},
{ "$match" : { "$and": [ { "duration": { "$gte": 3 } },
{ "duration": { "$lte": 180 } },
{ "moved": True }
]
}
},
{ "$group": {
"_id": {
"day": "$date_parts.day",
"month":"$date_parts.month",
"hour": "$date_parts.hour"
},
"tot_rentals": {"$sum": 1}
}
},
{ "$sort": {"_id": 1} }
]
)
cars_per_hour_filtered_list.append(list(cars_per_hour_filtered))
cars_per_hour_Milano = cars_per_hour_filtered_list[0]
cars_per_hour_Calgary = cars_per_hour_filtered_list[1]
cars_per_hour_Amsterdam = cars_per_hour_filtered_list[2]
#%%check missing data for Calgary
days=[]
rentals_Calgary=[]
for i in range(len(cars_per_hour_Calgary)):
days.append(datetime(2017,cars_per_hour_Calgary[i]['_id']['month'],
cars_per_hour_Calgary[i]['_id']['day'],
cars_per_hour_Calgary[i]['_id']['hour']))
rentals_Calgary.append(cars_per_hour_Calgary[i]['tot_rentals'])
Calgary=pd.DataFrame({'days':days,'rentals':rentals_Calgary},index=days)
missing_Calgary=pd.date_range(start = '2017-01-07', end = '2017-02-05',
freq='H' ).difference(Calgary.index) #find missing hours
Calgary=Calgary.sort_index()
#add missing data for Calgary
Nr=missing_Calgary.shape[0]
if Nr>0:
missing_data=pd.DataFrame({'days':missing_Calgary,'rentals':np.full(Nr,-1)},
index=missing_Calgary.values)#add 0 as temp value
Calgary=Calgary.append(missing_data)#add the two missing values to main Dataset
Calgary=Calgary.sort_index()#sort rows to put new rows to right place
index=np.argwhere((Calgary['rentals'].values==-1)) #find index where rentals are 0
Calgary['rentals'].values[index]=(Calgary['rentals'].values[index-1]+
Calgary['rentals'].values[index+1])//2 #use mean values
#%%check missing data for Milano
days=[]
rentals_Milano=[]
for i in range(len(cars_per_hour_Milano)):
days.append(datetime(2017,cars_per_hour_Milano[i]['_id']['month'],
cars_per_hour_Milano[i]['_id']['day'],
cars_per_hour_Milano[i]['_id']['hour']))
rentals_Milano.append(cars_per_hour_Milano[i]['tot_rentals'])
Milano=pd.DataFrame({'days':days,'rentals':rentals_Milano},index=days)
missing_Milano=pd.date_range(start = '2017-01-07', end = '2017-02-05',
freq='H' ).difference(Milano.index)
Milano=Milano.sort_index()
#add missing data for Milano
Nr=missing_Milano.shape[0]
if Nr>0:
missing_data=pd.DataFrame({'days':missing_Milano,'rentals':np.full(Nr,-1)},
index=missing_Milano.values)#add 0 as temp value
Milano=Milano.append(missing_data)#add the two missing values to main Dataset
Milano=Milano.sort_index()#sort rows to put new rows to right place
index=np.argwhere((Milano['rentals'].values==-1)) #find index where rentals are 0
Milano['rentals'].values[index]=(Milano['rentals'].values[index-1]+
Milano['rentals'].values[index+1])//2 #use mean values
#%%check missing data for Amsterdam
days=[]
rentals_Amsterdam=[]
for i in range(len(cars_per_hour_Amsterdam)):
days.append(datetime(2017,cars_per_hour_Amsterdam[i]['_id']['month'],
cars_per_hour_Amsterdam[i]['_id']['day'],
cars_per_hour_Amsterdam[i]['_id']['hour']))
rentals_Amsterdam.append(cars_per_hour_Amsterdam[i]['tot_rentals'])
Amsterdam=pd.DataFrame({'days':days,'rentals':rentals_Amsterdam},index=days)
missing_Amsterdam=pd.date_range(start = '2017-01-07', end = '2017-02-05',
freq='H' ).difference(Amsterdam.index)
Amsterdam=Amsterdam.sort_index()
#add missing data for Amsterdam
Nr=missing_Amsterdam.shape[0]
if Nr>0:
missing_data=pd.DataFrame({'days':missing_Amsterdam,'rentals':np.full(Nr,-1)},
index=missing_Amsterdam.values)#add 0 as temp value
Amsterdam=Amsterdam.append(missing_data)#add the two missing values to main Dataset
Amsterdam=Amsterdam.sort_index()#sort rows to put new rows to right place
index=np.argwhere((Amsterdam['rentals'].values==-1)) #find index where rentals are 0
Amsterdam['rentals'].values[index]=(Amsterdam['rentals'].values[index-1]+
Amsterdam['rentals'].values[index+1])//2 #use mean values
Calgary.to_csv('calgary.csv', index=False)
Milano.to_csv('milano.csv', index=False)
Amsterdam.to_csv('amsterdam.csv', index=False)
Calgary = pd.read_csv('calgary.csv',sep=',',parse_dates=[0],index_col=0)
Milano = pd.read_csv('milano.csv',sep=',',parse_dates=[0],index_col=0)
Amsterdam = pd.read_csv('amsterdam.csv',sep=',',parse_dates=[0],index_col=0)
#%% CHECK OF STATIONARITY
fig, ax = plt.subplots()
ax.plot(Calgary.index.values,Calgary["rentals"].values,label='real rentals')
rolling_mean_calgary=Calgary['rentals'].rolling(168).mean()# 168 is 24*1week
rolling_std_calgary=Calgary['rentals'].rolling(168).std()# 168 is 24*1week
ax.plot(rolling_mean_calgary,label='rolling mean')
ax.plot(rolling_std_calgary,label='rolling std')
ax.legend(ncol=1,fontsize='small')
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.title('Rolling statistics Calgary (1 week sliding window)')
plt.show()
fig, ax = plt.subplots()
ax.plot(Milano.index.values,Milano["rentals"].values,label='real rentals')
rolling_mean_milano=Milano['rentals'].rolling(168).mean()
rolling_std_milano=Milano['rentals'].rolling(168).std()# 168 is 24*1week
ax.plot(rolling_mean_milano,label='rolling mean')
ax.plot(rolling_std_milano,label='rolling std')
ax.legend(ncol=1,fontsize='small')
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.title('Rolling statistics Milano (1 week sliding window)')
plt.show()
fig, ax = plt.subplots()
ax.plot(Amsterdam.index.values,Amsterdam["rentals"].values,label='real rentals')
rolling_mean_amsterdam=Amsterdam['rentals'].rolling(168).mean()
rolling_std_amsterdam=Amsterdam['rentals'].rolling(168).std()# 168 is 24*1week
ax.plot(rolling_mean_amsterdam,label='rolling mean')
ax.plot(rolling_std_amsterdam,label='rolling std')
ax.legend(ncol=1,fontsize='small')
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.title('Rolling statistics Amsterdam (1 week sliding window)')
plt.show()
#%% ACF AND PACF
#Plot ACF and PACF for Amsterdam
sm.graphics.tsa.plot_acf(Amsterdam.rentals.values, lags=40,
title='Autocorrelation for Amsterdam')
sm.graphics.tsa.plot_pacf(Amsterdam.rentals.values, lags=40,
title='Partial Autocorrelation for Amsterdam')
plt.show()
#Plot ACF and PACF for Calgary
sm.graphics.tsa.plot_acf(Calgary.rentals.values, lags=40,
title='Autocorrelation for Calgary')
sm.graphics.tsa.plot_pacf(Calgary.rentals.values, lags=40,
title='Partial Autocorrelation for Calgary')
plt.show()
#Plot ACF and PACF for Milano
sm.graphics.tsa.plot_acf(Milano.rentals.values, lags=40,
title='Autocorrelation for Milano')
sm.graphics.tsa.plot_pacf(Milano.rentals.values, lags=40,
title='Partial Autocorrelation for Milano')
plt.show()
#%% FIT THE MODEL without division in test and training
#Amsterdam
model = ARIMA(Amsterdam.astype(float), order=(2,0,4))
model_fit = model.fit(disp=0,method='css')
print(model_fit.summary())
fig, ax = plt.subplots()
ax.plot(Amsterdam.index.values,Amsterdam.rentals.values,label='real rentals')
ax.plot(model_fit.fittedvalues, color='red', label='predicted values')
ax.legend(ncol=1,fontsize='small')
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.title('Real reltals vs Predicted Values with ARIMA(2,0,4) for Amsterdam')
plt.show()
#Calgary
model = ARIMA(Calgary.astype(float), order=(2,0,5))
model_fit = model.fit(disp=0,method='css')
print(model_fit.summary())
fig, ax = plt.subplots()
ax.plot(Calgary.index.values,Calgary.rentals.values,label='real rentals')
ax.plot(model_fit.fittedvalues, color='red', label='predicted values')
ax.legend(ncol=1,fontsize='small')
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.title('Real reltals vs Predicted Values with ARIMA(2,0,5) for Calgary')
plt.show()
#Milano
model = ARIMA(Milano.astype(float), order=(2,0,4))
model_fit = model.fit(disp=0,method='css')
print(model_fit.summary())
fig, ax = plt.subplots()
ax.plot(Milano.index.values,Milano.rentals.values,label='real rentals')
ax.plot(model_fit.fittedvalues, color='red', label='predicted values')
ax.legend(ncol=1,fontsize='small')
fig.autofmt_xdate()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.title('Real reltals vs Predicted Values with ARIMA(2,0,5) for Milano')
plt.show()
#%%Fit the model with division in training and test
#Amsterdam
X_am = Amsterdam.values.astype(float)
tr_size_am = 504 #size train (504h=3 week)
ts_size_am = 72 #size test (72h = 3 days)
train_am, test_am = X_am[0:tr_size_am], X_am[tr_size_am:(tr_size_am+ts_size_am)]
history = [x for x in train_am]
predictions=[]
for t in range(0, ts_size_am):#for each hour I do arima model
model = ARIMA(history, order=(3,0,4))
model_fit = model.fit(disp=0,method='css')
output = model_fit.forecast() #get all the forecast
yhat = output[0] #first forecast
predictions.append(yhat)
obs = test_am[t]
history.append(obs)
plt.figure()
plt.plot(test_am, color='black',label='Original')
plt.plot(predictions, label = 'Prediction')
plt.xlabel('Test Hours')
plt.ylabel('Rentals')
plt.title('Amsterdam: ARIMA(3,0,4) with expanding window')
plt.legend()
plt.show()
print('Amsterdam: (3,0,4) model => MAE: %.3f -- MSE: %.3f -- MAPE: %.3f -- R2: %.3f' %(
mean_absolute_error(test_am, predictions),
mean_squared_error(test_am, predictions),
mean_absolute_percentage_error(test_am, predictions),
r2_score(test_am, predictions)))
#Calgary
X_ca = Calgary.values.astype(float)
tr_size_ca = 504 #size train (504h=3 week)
ts_size_ca = 72 #size test (72h = 3 days)
train_ca, test_ca = X_ca[0:tr_size_ca], X_ca[tr_size_ca:(tr_size_ca+ts_size_ca)]
history = [x for x in train_ca]
predictions=[]
for t in range(0, ts_size_ca):#for each hour I do arima model
model = ARIMA(history, order=(2,0,5))
model_fit = model.fit(disp=0,method='css')
output = model_fit.forecast() #get all the forecast
yhat = output[0] #first forecast
predictions
predictions.append(yhat)
obs = test_ca[t]
history.append(obs)
plt.figure()
plt.plot(test_ca, color='black',label='Original')
plt.plot(predictions, label = 'Prediction')
plt.xlabel('Test Hours')
plt.ylabel('Rentals')
plt.title('Calgary: ARIMA(2,0,5) with expanding window')
plt.legend()
plt.show()
print('Calgary: (2,0,5) model => MAE: %.3f -- MSE: %.3f -- MAPE: %.3f -- R2: %.3f' %(
mean_absolute_error(test_ca, predictions),
mean_squared_error(test_ca, predictions),
mean_absolute_percentage_error(test_ca, predictions),
r2_score(test_ca, predictions)))
#Milano
X_mi = Milano.values.astype(float)
tr_size_mi = 504 #size train (504h=3 week)
ts_size_mi = 72 #size test (72h = 3 days)
train_mi, test_mi = X_mi[0:tr_size_mi], X_mi[tr_size_mi:(tr_size_mi+ts_size_mi)]
history = [x for x in train_mi]
predictions=[]
for t in range(0, ts_size_mi):#for each hour I do arima model
model = ARIMA(history, order=(2,0,4))
model_fit = model.fit(disp=0,method='css')
output = model_fit.forecast() #get all the forecast
yhat = output[0] #first forecast
predictions.append(yhat)
obs = test_mi[t]
history.append(obs)
plt.figure()
plt.plot(test_mi, color='black', label='Original')
plt.plot(predictions, label = 'Prediction')
plt.xlabel('Test Hours')
plt.ylabel('Rentals')
plt.title('Milano: ARIMA(2,0,4) with expanding window')
plt.legend()
plt.show()
print('Milano: (2,0,4) model => MAE: %.3f -- MSE: %.3f -- MAPE: %.3f -- R2: %.3f' %(
mean_absolute_error(test_mi, predictions),
mean_squared_error(test_mi, predictions),
mean_absolute_percentage_error(test_mi, predictions),
r2_score(test_mi, predictions)))
#%%Fit the model with a grid search over p and q with train and test fixed
##Amsterdam
p_values = range(0,8) #y axis
q_values = range(0,6) #x axis
X_am = Amsterdam.values.astype(float)
tr_size_am = 504 #size train (504h=3 week)
ts_size_am = 72 #size test (72h = 3 days)
train_am, test_am = X_am[0:tr_size_am], X_am[tr_size_am:(tr_size_am+ts_size_am)]
predictions=np.zeros((len(p_values),len(q_values),ts_size_am))
MAE=np.zeros((len(p_values),len(q_values)))
MSE=np.zeros((len(p_values),len(q_values)))
MAPE=np.zeros((len(p_values),len(q_values)))
R2=np.zeros((len(p_values),len(q_values)))
warnings.filterwarnings('ignore')
for p in p_values:
for q in q_values:
if (p==0 and q==0):
MAE[p_values.index(p)][q_values.index(q)]=np.nan
MSE[p_values.index(p)][q_values.index(q)]=np.nan
MAPE[p_values.index(p)][q_values.index(q)]=np.nan
R2[p_values.index(p)][q_values.index(q)]=np.nan
continue
print('Testing ARIMA order (%i,0,%i)' %(p,q))
history = [x for x in train_am]
flag=0
for t in range(0, ts_size_am):#for each hour I do arima model
model = ARIMA(history, order=(p,0,q))
try:
model_fit = model.fit(disp=0, method='css')
except ValueError:
flag=1
MAE[p_values.index(p)][q_values.index(q)]=np.nan
MSE[p_values.index(p)][q_values.index(q)]=np.nan
MAPE[p_values.index(p)][q_values.index(q)]=np.nan
R2[p_values.index(p)][q_values.index(q)]=np.nan
break
output = model_fit.forecast() #get all the forecast
yhat = output[0] #first forecast
predictions[p_values.index(p)][q_values.index(q)][t]=yhat
obs = test_am[t]
history.append(obs)
if flag==0:
MAE[p_values.index(p)][q_values.index(q)]=mean_absolute_error(
test_am, predictions[p_values.index(p)][q_values.index(q)])
MSE[p_values.index(p)][q_values.index(q)]=mean_squared_error(
test_am, predictions[p_values.index(p)][q_values.index(q)])
MAPE[p_values.index(p)][q_values.index(q)]=mean_absolute_percentage_error(
test_am, predictions[p_values.index(p)][q_values.index(q)])
R2[p_values.index(p)][q_values.index(q)]=r2_score(
test_am, predictions[p_values.index(p)][q_values.index(q)])
plt.figure()
heat_map = sb.heatmap(MAPE,xticklabels=q_values,yticklabels=p_values,annot=True,
cmap='gist_stern')
heat_map.set_title('MAPE: Expanding window for different value of p and q Amsterdam')
heat_map.set_xlabel('q')
heat_map.set_ylabel('p')
plt.show()
ind=np.argwhere(MAPE == np.nanmin(MAPE))[0]
print('-----Amsterdam-----')
print('p best is: ' + str(p_values[ind[0]]))
print('q best is: ' + str(q_values[ind[1]]))
##Calgary
p_values = range(0,8) #y axis
q_values = range(0,6) #x axis
X_ca = Calgary.values.astype(float)
tr_size_ca = 504 #size train (504h=3 week)
ts_size_ca = 72 #size test (72h = 3 days)
train_ca, test_ca = X_ca[0:tr_size_ca], X_ca[tr_size_ca:(tr_size_ca+ts_size_ca)]
predictions=np.zeros((len(p_values),len(q_values),ts_size_ca))
MAE=np.zeros((len(p_values),len(q_values)))
MSE=np.zeros((len(p_values),len(q_values)))
MAPE=np.zeros((len(p_values),len(q_values)))
R2=np.zeros((len(p_values),len(q_values)))
warnings.filterwarnings('ignore')
for p in p_values:
for q in q_values:
if (p==0 and q==0):
MAE[p_values.index(p)][q_values.index(q)]=np.nan
MSE[p_values.index(p)][q_values.index(q)]=np.nan
MAPE[p_values.index(p)][q_values.index(q)]=np.nan
R2[p_values.index(p)][q_values.index(q)]=np.nan
continue
print('Testing ARIMA order (%i,0,%i)' %(p,q))
history = [x for x in train_ca]
flag=0
for t in range(0, ts_size_ca):#for each hour I do arima model
model = ARIMA(history, order=(p,0,q))
try:
model_fit = model.fit(disp=0,method='css')
except ValueError:
flag=1
MAE[p_values.index(p)][q_values.index(q)]=np.nan
MSE[p_values.index(p)][q_values.index(q)]=np.nan
MAPE[p_values.index(p)][q_values.index(q)]=np.nan
R2[p_values.index(p)][q_values.index(q)]=np.nan
break
output = model_fit.forecast() #get all the forecast
yhat = output[0] #first forecast
predictions[p_values.index(p)][q_values.index(q)][t]=yhat
obs = test_ca[t]
history.append(obs)
if flag==0:
MAE[p_values.index(p)][q_values.index(q)]=mean_absolute_error(
test_ca, predictions[p_values.index(p)][q_values.index(q)])
MSE[p_values.index(p)][q_values.index(q)]=mean_squared_error(
test_ca, predictions[p_values.index(p)][q_values.index(q)])
MAPE[p_values.index(p)][q_values.index(q)]=mean_absolute_percentage_error(
test_ca, predictions[p_values.index(p)][q_values.index(q)])
R2[p_values.index(p)][q_values.index(q)]=r2_score(
test_ca, predictions[p_values.index(p)][q_values.index(q)])
plt.figure()
heat_map = sb.heatmap(MAPE,xticklabels=q_values,yticklabels=p_values,annot=True,
cmap='gist_stern')
heat_map.set_title('MAPE: Expanding window for different value of p and q Calgary')
heat_map.set_xlabel('q')
heat_map.set_ylabel('p')
plt.show()
ind=np.argwhere(MAPE == np.nanmin(MAPE))[0]
print('-----Calgary-----')
print('p best is: ' + str(p_values[ind[0]]))
print('q best is: ' + str(q_values[ind[1]]))
##Milano
p_values = range(0,8) #y axis
q_values = range(0,6) #x axis
X_mi = Milano.values.astype(float)
tr_size_mi = 504 #size train (504h=3 week)
ts_size_mi = 72 #size test (72h = 3 days)
train_mi, test_mi = X_mi[0:tr_size_mi], X_mi[tr_size_mi:(tr_size_mi+ts_size_mi)]
predictions=np.zeros((len(p_values),len(q_values),ts_size_mi))
MAE=np.zeros((len(p_values),len(q_values)))
MSE=np.zeros((len(p_values),len(q_values)))
MAPE=np.zeros((len(p_values),len(q_values)))
R2=np.zeros((len(p_values),len(q_values)))
warnings.filterwarnings('ignore')
for p in p_values:
for q in q_values:
if (p==0 and q==0):
MAE[p_values.index(p)][q_values.index(q)]=np.nan
MSE[p_values.index(p)][q_values.index(q)]=np.nan
MAPE[p_values.index(p)][q_values.index(q)]=np.nan
R2[p_values.index(p)][q_values.index(q)]=np.nan
continue
print('Testing ARIMA order (%i,0,%i)' %(p,q))
history = [x for x in train_mi]
flag=0
for t in range(0, ts_size_mi):#for each hour I do arima model
model = ARIMA(history, order=(p,0,q))
try:
model_fit = model.fit(disp=0,method='css')
except ValueError:
flag=1
MAE[p_values.index(p)][q_values.index(q)]=np.nan
MSE[p_values.index(p)][q_values.index(q)]=np.nan
MAPE[p_values.index(p)][q_values.index(q)]=np.nan
R2[p_values.index(p)][q_values.index(q)]=np.nan
break
output = model_fit.forecast() #get all the | |
For the delay cal_type the gain is calculated as:
gain = 1 * exp((+/-) * 2 * pi * j * delay * frequency)
where the (+/-) is dictated by the delay_convention
Args:
delay_convention: exponent sign to use in the conversion. Defaults to minus.
run_check: Option to check for the existence and proper shapes of
parameters after converting this object. Default is True.
check_extra: Option to check shapes and types of optional parameters
as well as required ones. Default is True.
run_check_acceptability: Option to check acceptable range of the values of
parameters after converting this object. Default is True.
"""
if self.cal_type == 'gain':
raise ValueError('The data is already a gain cal_type.')
elif self.cal_type == 'delay':
if delay_convention == 'minus':
conv = -1
elif delay_convention == 'plus':
conv = 1
else:
raise ValueError('delay_convention can only be "minus" or "plus"')
self.history += ' Converted from delays to gains using pyuvdata.'
phase_array = np.zeros((self.Nants_data, self.Nspws, self.Nfreqs, self.Ntimes, self.Njones))
for si in range(self.Nspws):
temp = conv * 2 * np.pi * np.dot(self.delay_array[:, si, 0, :, :, np.newaxis],
self.freq_array[si, np.newaxis, :])
temp = np.transpose(temp, (0, 3, 1, 2))
phase_array[:, si, :, :, :] = temp
gain_array = np.exp(1j * phase_array)
new_quality = np.repeat(self.quality_array[:, :, :, :, :], self.Nfreqs, axis=2)
self.set_gain()
self.gain_array = gain_array
self.quality_array = new_quality
self.delay_array = None
if self.total_quality_array is not None:
new_total_quality_array = np.repeat(self.total_quality_array[:, :, :, :], self.Nfreqs, axis=1)
self.total_quality_array = new_total_quality_array
# check if object is self-consistent
if run_check:
self.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
else:
raise ValueError('cal_type is unknown, cannot convert to gain')
def get_gains(self, ant, jpol=None):
"""
Get the gain associated with an antenna and/or polarization.
Parameters
----------
ant : int
Antenna integer to request
jpol : int or str, optional
Instrumental polarization to request. Ex. 'Jxx'
Returns
-------
complex ndarray
Gain solution of shape (Nfreqs, Ntimes) or
(Nfreqs, Ntimes, Npol) if Njones > 1 and jpol is not fed.
"""
if self.cal_type != 'gain':
raise ValueError("cal_type must be 'gain' for get_gains() method")
return self._slice_array(self._parse_key(ant, jpol=jpol), self.gain_array)
def get_flags(self, ant, jpol=None):
"""
Get the flags associated with an antenna and/or polarization.
Parameters
----------
ant : int
Antenna integer to request
jpol : int or str, optional
Instrumental polarization to request. Ex. 'Jxx'
Returns
-------
boolean ndarray
Flags of shape (Nfreqs, Ntimes) or
(Nfreqs, Ntimes, Npol) if Njones > 1 and jpol is not fed.
"""
return self._slice_array(self._parse_key(ant, jpol=jpol), self.flag_array)
def get_quality(self, ant, jpol=None):
"""
Get the qualities associated with an antenna and/or polarization.
Parameters
----------
ant : int
Antenna integer to request
jpol : int or str, optional
Instrumental polarization to request. Ex. 'Jxx'
Returns
-------
float ndarray
Qualities of shape (Nfreqs, Ntimes) or
(Nfreqs, Ntimes, Npol) if Njones > 1 and jpol is not fed.
"""
return self._slice_array(self._parse_key(ant, jpol=jpol), self.quality_array)
def ant2ind(self, antnum):
"""
Given antenna number return its index in data arrays
Parameters
----------
antnum : int
Antenna number
Returns
-------
int
Index in data arrays
"""
if not self._has_key(antnum=antnum):
raise ValueError("{} not found in ant_array".format(antnum))
return np.argmin(np.abs(self.ant_array - antnum))
def jpol2ind(self, jpol):
"""
Given a jones polarization, return its index in data arrays
Parameters
----------
jpol : int or str
Jones polarization
Returns
-------
int
Index in data arrays
"""
if isinstance(jpol, (str, np.str)):
jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation)
if not self._has_key(jpol=jpol):
raise ValueError("{} not found in jones_array".format(jpol))
return np.argmin(np.abs(self.jones_array - jpol))
def _has_key(self, antnum=None, jpol=None):
"""
Check if this UVCal has the requested antenna or polarization
"""
if antnum is not None:
if antnum not in self.ant_array:
return False
if jpol is not None:
if isinstance(jpol, (str, np.str)):
jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation)
if jpol not in self.jones_array:
return False
return True
def _slice_array(self, key, data_array, squeeze_pol=True):
"""
Slice a data array given a data key
"""
key = uvutils._get_iterable(key)
if len(key) == 1:
# interpret as a single antenna
output = data_array[self.ant2ind(key[0]), 0, :, :, :]
if squeeze_pol and output.shape[-1] == 1:
output = output[:, :, 0]
return output
elif len(key) == 2:
# interpret as an antenna-pol pair
return data_array[self.ant2ind(key[0]), 0, :, :, self.jpol2ind(key[1])]
def _parse_key(self, ant, jpol=None):
"""
Parse key inputs and return a standard antenna-polarization key
"""
if isinstance(ant, (list, tuple)):
# interpret ant as (ant,) or (ant, jpol)
key = tuple(ant)
elif isinstance(ant, (int, np.integer)):
# interpret ant as antenna number
key = (ant,)
# add jpol if fed
if jpol is not None:
key += (jpol,)
return key
def _convert_from_filetype(self, other):
for p in other:
param = getattr(other, p)
setattr(self, p, param)
def _convert_to_filetype(self, filetype):
if filetype == 'calfits':
from . import calfits
other_obj = calfits.CALFITS()
else:
raise ValueError('filetype must be calfits.')
for p in self:
param = getattr(self, p)
setattr(other_obj, p, param)
return other_obj
def read_calfits(self, filename, run_check=True, check_extra=True,
run_check_acceptability=True, strict_fits=False):
"""
Read in data from a calfits file.
Args:
filename: The calfits file or list of files to read from.
string path, or list or tuple of string paths.
run_check: Option to check for the existence and proper shapes of
parameters after reading in the file. Default is True.
check_extra: Option to check optional parameters as well as required
ones. Default is True.
run_check_acceptability: Option to check acceptable range of the values of
parameters after reading in the file. Default is True.
strict_fits: boolean
If True, require that the data axes have cooresponding NAXIS, CRVAL,
CDELT and CRPIX keywords. If False, allow CRPIX to be missing and
set it equal to zero and allow the CRVAL for the spw directions to
be missing and set it to zero. This keyword exists to support old
calfits files that were missing many CRPIX and CRVAL keywords.
Default is False.
"""
from . import calfits
if isinstance(filename, (list, tuple)):
self.read_calfits(filename[0], run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_fits=strict_fits)
if len(filename) > 1:
for f in filename[1:]:
uvcal2 = UVCal()
uvcal2.read_calfits(f, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_fits=strict_fits)
self += uvcal2
del(uvcal2)
else:
calfits_obj = calfits.CALFITS()
calfits_obj.read_calfits(filename, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_fits=strict_fits)
self._convert_from_filetype(calfits_obj)
del(calfits_obj)
def write_calfits(self, filename, run_check=True, check_extra=True,
run_check_acceptability=True, clobber=False):
"""Write data to a calfits file.
Args:
filename: The calfits filename to write to.
run_check: Option to check for the existence and proper shapes of
parameters before writing the file. Default is True.
check_extra: Option to check optional parameters as well as required
ones. Default is True.
run_check_acceptability: Option to check acceptable range of the values of
parameters before writing the file. Default is True.
clobber: Option to overwrite the filename if the file already exists.
Default is False.
"""
calfits_obj = self._convert_to_filetype('calfits')
calfits_obj.write_calfits(filename,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
clobber=clobber)
del(calfits_obj)
def read_fhd_cal(self, cal_file, obs_file, settings_file=None, raw=True,
extra_history=None, run_check=True, check_extra=True,
run_check_acceptability=True):
"""
Read data from an FHD cal.sav file.
Args:
cal_file: The cal.sav file to read from.
obs_file: The obs.sav file to read from.
settings_file: The settings_file to read from. Optional, but very
useful for provenance.
raw: Option to use the raw (per antenna, per frequency) solution or
to use the fitted (polynomial over phase/amplitude) solution.
Default is True (meaning use the raw solutions).
extra_history: Optional string or list of strings to add to the
object's history parameter. Default is None.
run_check: Option to check for the existence and proper shapes of
parameters after reading in the file. Default is True.
check_extra: Option to check optional parameters as well as required
ones. Default is True.
run_check_acceptability: Option to check acceptable range of the values of
parameters after reading in the file. Default is True.
"""
from . import fhd_cal
if isinstance(cal_file, (list, tuple)):
if isinstance(obs_file, (list, tuple)):
if len(obs_file) != len(cal_file):
raise ValueError('Number of obs_files must match number of cal_files')
else:
raise ValueError('Number of obs_files must match number of cal_files')
if settings_file is not None:
if isinstance(settings_file, (list, tuple)):
if len(settings_file) != len(cal_file):
raise ValueError('Number of settings_files must match number of cal_files')
else:
raise ValueError('Number of settings_files must match number of cal_files')
settings_file_use = settings_file[0]
self.read_fhd_cal(cal_file[0], obs_file[0], settings_file=settings_file_use,
raw=raw, extra_history=extra_history,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if len(cal_file) | |
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
def test_creation_with_some_keywords(self):
Person = NamedTuple('Person', 'age first last')
p1 = Person(17, first='John', last='Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
p1 = Person(17, last='Doe', first='John')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
def test_custom_new(self):
class Book(NamedTuple):
title = 0
author = 1
genre = 2
def __new__(cls, string):
args = [s.strip() for s in string.split(';')]
return super(Book, cls).__new__(cls, *tuple(args))
b1 = Book('The Last Mohican; <NAME>; Historical')
self.assertEqual(b1.title, 'The Last Mohican')
self.assertEqual(b1.author, '<NAME>')
self.assertEqual(b1.genre, 'Historical')
def test_defaults_in_class(self):
class Character(NamedTuple):
name = 0
gender = 1, None, 'male'
klass = 2, None, 'fighter'
for char in (
{'name':'<NAME>'},
{'name':'<NAME>', 'klass':'scholar'},
{'name':'<NAME>', 'gender':'female'},
{'name':'<NAME>', 'gender':'female', 'klass':'sorceress'},
):
c = Character(**char)
for name, value in (('name', None), ('gender','male'), ('klass','fighter')):
if name in char:
value = char[name]
self.assertEqual(getattr(c, name), value)
def test_defaults_in_class_that_are_falsey(self):
class Point(NamedTuple):
x = 0, 'horizondal coordinate', 0
y = 1, 'vertical coordinate', 0
p = Point()
self.assertEqual(p.x, 0)
self.assertEqual(p.y, 0)
def test_pickle_namedtuple_with_module(self):
if isinstance(LifeForm, Exception):
raise LifeForm
lf = LifeForm('this', 'that', 'theother')
test_pickle_dump_load(self.assertEqual, lf)
def test_pickle_namedtuple_without_module(self):
if isinstance(DeathForm, Exception):
raise DeathForm
df = DeathForm('sickly green', '2x4', 'foul')
test_pickle_dump_load(self.assertEqual, df)
def test_subclassing(self):
if isinstance(ThatsIt, Exception):
raise ThatsIt
ti = ThatsIt('Henry', 'Weinhardt')
self.assertEqual(ti.blah, 'Henry')
self.assertTrue(ti.what(), 'Henry')
test_pickle_dump_load(self.assertEqual, ti)
def test_contains(self):
Book = NamedTuple('Book', 'title author genre')
b = Book('Teckla', 'Steven Brust', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
def test_fixed_size(self):
class Book(NamedTuple):
_size_ = TupleSize.fixed
title = 0
author = 1
genre = 2
b = Book('Teckla', '<NAME>', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, '<NAME>')
self.assertRaises(TypeError, Book, 'Teckla', '<NAME>')
self.assertRaises(TypeError, Book, 'Teckla')
def test_minimum_size(self):
class Book(NamedTuple):
_size_ = TupleSize.minimum
title = 0
author = 1
b = Book('Teckla', '<NAME>', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, '<NAME>')
b = Book('Teckla', '<NAME>')
self.assertTrue('Teckla' in b)
self.assertTrue('<NAME>' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, '<NAME>')
self.assertRaises(TypeError, Book, 'Teckla')
def test_variable_size(self):
class Book(NamedTuple):
_size_ = TupleSize.variable
title = 0
author = 1
genre = 2
b = Book('Teckla', '<NAME>', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('<NAME>' in b)
self.assertTrue('fantasy' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, '<NAME>')
self.assertEqual(b.genre, 'fantasy')
b = Book('Teckla', '<NAME>')
self.assertTrue('Teckla' in b)
self.assertTrue('<NAME>' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, '<NAME>')
self.assertRaises(AttributeError, getattr, b, 'genre')
self.assertRaises(TypeError, Book, title='Teckla', genre='fantasy')
self.assertRaises(TypeError, Book, author='<NAME>')
def test_combining_namedtuples(self):
class Point(NamedTuple):
x = 0, 'horizontal coordinate', 1
y = 1, 'vertical coordinate', -1
class Color(NamedTuple):
r = 0, 'red component', 11
g = 1, 'green component', 29
b = 2, 'blue component', 37
Pixel1 = NamedTuple('Pixel', Point+Color, module=__name__)
class Pixel2(Point, Color):
"a colored dot"
class Pixel3(Point):
r = 2, 'red component', 11
g = 3, 'green component', 29
b = 4, 'blue component', 37
self.assertEqual(Pixel1._fields_, 'x y r g b'.split())
self.assertEqual(Pixel1.x.__doc__, 'horizontal coordinate')
self.assertEqual(Pixel1.x.default, 1)
self.assertEqual(Pixel1.y.__doc__, 'vertical coordinate')
self.assertEqual(Pixel1.y.default, -1)
self.assertEqual(Pixel1.r.__doc__, 'red component')
self.assertEqual(Pixel1.r.default, 11)
self.assertEqual(Pixel1.g.__doc__, 'green component')
self.assertEqual(Pixel1.g.default, 29)
self.assertEqual(Pixel1.b.__doc__, 'blue component')
self.assertEqual(Pixel1.b.default, 37)
self.assertEqual(Pixel2._fields_, 'x y r g b'.split())
self.assertEqual(Pixel2.x.__doc__, 'horizontal coordinate')
self.assertEqual(Pixel2.x.default, 1)
self.assertEqual(Pixel2.y.__doc__, 'vertical coordinate')
self.assertEqual(Pixel2.y.default, -1)
self.assertEqual(Pixel2.r.__doc__, 'red component')
self.assertEqual(Pixel2.r.default, 11)
self.assertEqual(Pixel2.g.__doc__, 'green component')
self.assertEqual(Pixel2.g.default, 29)
self.assertEqual(Pixel2.b.__doc__, 'blue component')
self.assertEqual(Pixel2.b.default, 37)
self.assertEqual(Pixel3._fields_, 'x y r g b'.split())
self.assertEqual(Pixel3.x.__doc__, 'horizontal coordinate')
self.assertEqual(Pixel3.x.default, 1)
self.assertEqual(Pixel3.y.__doc__, 'vertical coordinate')
self.assertEqual(Pixel3.y.default, -1)
self.assertEqual(Pixel3.r.__doc__, 'red component')
self.assertEqual(Pixel3.r.default, 11)
self.assertEqual(Pixel3.g.__doc__, 'green component')
self.assertEqual(Pixel3.g.default, 29)
self.assertEqual(Pixel3.b.__doc__, 'blue component')
self.assertEqual(Pixel3.b.default, 37)
def test_function_api_type(self):
class Tester(NamedTuple):
def howdy(self):
return 'backwards', list(reversed(self))
Testee = NamedTuple('Testee', 'a c e', type=Tester)
t = Testee(1, 2, 3)
self.assertEqual(t.howdy(), ('backwards', [3, 2, 1]))
def test_asdict(self):
class Point(NamedTuple):
x = 0, 'horizontal coordinate', 1
y = 1, 'vertical coordinate', -1
class Color(NamedTuple):
r = 0, 'red component', 11
g = 1, 'green component', 29
b = 2, 'blue component', 37
Pixel = NamedTuple('Pixel', Point+Color, module=__name__)
pixel = Pixel(99, -101, 255, 128, 0)
self.assertEqual(pixel._asdict(), {'x':99, 'y':-101, 'r':255, 'g':128, 'b':0})
def test_make(self):
class Point(NamedTuple):
x = 0, 'horizontal coordinate', 1
y = 1, 'vertical coordinate', -1
self.assertEqual(Point(4, 5), (4, 5))
self.assertEqual(Point._make((4, 5)), (4, 5))
def test_replace(self):
class Color(NamedTuple):
r = 0, 'red component', 11
g = 1, 'green component', 29
b = 2, 'blue component', 37
purple = Color(127, 0, 127)
mid_gray = purple._replace(g=127)
self.assertEqual(mid_gray, (127, 127, 127))
class TestNamedConstant(TestCase):
def test_constantness(self):
class K(NamedConstant):
PI = 3.141596
TAU = 2 * PI
self.assertEqual(K.PI, 3.141596)
self.assertEqual(K.TAU, 2 * K.PI)
with self.assertRaisesRegex(AttributeError, r'cannot rebind constant'):
K.PI = 9
with self.assertRaisesRegex(AttributeError, r'cannot delete constant'):
del K.PI
with self.assertRaisesRegex(AttributeError, r'cannot rebind constant'):
K('PI', 3)
self.assertTrue(K.PI in K)
self.assertTrue(K.TAU in K)
def test_duplicates(self):
class CardNumber(NamedConstant):
ACE = 11
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
NINE = 9
TEN = 10
JACK = 10
QUEEN = 10
KING = 10
self.assertFalse(CardNumber.TEN is CardNumber.JACK)
self.assertEqual(CardNumber.TEN, CardNumber.JACK)
self.assertEqual(CardNumber.TEN, 10)
def test_extend_constants(self):
class CardSuit(NamedConstant):
HEARTS = 1
SPADES = 2
DIAMONTS = 3
CLUBS = 4
self.assertEqual(CardSuit.HEARTS, 1)
stars = CardSuit('STARS', 5)
self.assertIs(stars, CardSuit.STARS)
self.assertEqual(CardSuit.STARS, 5)
self.assertTrue(CardSuit.STARS in CardSuit)
def test_constant_with_docstring(self):
class Stuff(NamedConstant):
Artifact = constant(7, "lucky number!")
Bowling = 11
HillWomp = constant(29, 'blah blah')
self.assertEqual(Stuff.Artifact, 7)
self.assertEqual(Stuff.Artifact.__doc__, 'lucky number!')
self.assertEqual(Stuff.Bowling, 11)
self.assertEqual(Stuff.Bowling.__doc__, None)
self.assertEqual(Stuff.HillWomp, 29)
self.assertEqual(Stuff.HillWomp.__doc__, 'blah blah')
def test_deep_copy(self):
import copy
class APITypes(aenum.Constant):
STRING = "string"
INT = "int"
APITypes('string')
d = {"first": APITypes.STRING}
copy.deepcopy(d)
self.assertTrue(d['first'] is APITypes.STRING)
def test_subclass_w_same_value(self):
class Foo(aenum.Constant):
BLA = 'bla1'
ABA = 'aba1'
class Bar(aenum.Constant):
BLA = Foo.BLA
ABA = 'aba2'
self.assertEqual(Foo.BLA, Bar.BLA)
self.assertFalse(Foo.BLA is Bar.BLA)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
CONVERT_TEST_SIGABRT = 4 # and this one
CONVERT_TEST_SIGIOT = 4
CONVERT_TEST_EIO = 7
CONVERT_TEST_EBUS = 7 # and this one
class TestIntEnumConvert(TestCase):
def test_convert_value_lookup_priority(self):
test_type = IntEnum._convert_(
'UnittestConvert',
'__main__',
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
self.assertEqual(test_type(4).name, 'CONVERT_TEST_SIGABRT')
self.assertEqual(test_type(7).name, 'CONVERT_TEST_EBUS')
self.assertEqual(
list(test_type),
[
test_type.CONVERT_TEST_SIGABRT,
test_type.CONVERT_TEST_NAME_A,
test_type.CONVERT_TEST_EBUS,
],
)
def test_convert_(self):
test_type = IntEnum._convert_(
'UnittestConvert',
'__main__',
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
class TestStarImport(TestCase):
def test_all_exports_names(self):
scope = {}
exec('from aenum import *', scope, scope)
self.assertIn('Enum', scope)
class TestStackoverflowAnswers(TestCase):
def test_self_referential_directions(self):
# https://stackoverflow.com/a/64000706/208880
class Directions(Enum):
_order_ = 'NORTH WEST SOUTH EAST'
#
NORTH = 1, 0
WEST = 0, 1
SOUTH = -1, 0
EAST = 0, -1
#
def __init__(self, x, y):
self.x = x
self.y = y
if len(self.__class__):
# make links
all = list(self.__class__)
left, right = all[0], all[-1]
self.left = left
self.right = right
left.right = self
right.left = self
#
D = Directions
self.assertEqual(D.NORTH.value, (1, 0))
self.assertTrue(D.NORTH.left is D.WEST)
self.assertTrue(D.SOUTH.right is D.WEST)
def test_self_referential_rock_paper_scissors(self):
# https://stackoverflow.com/a/57085357/208880
class RPS(Enum):
_order_ = 'Rock, Paper, Scissors'
#
Rock = "rock"
Paper = "paper"
Scissors = "scissors"
#
def __init__(self, value):
if len(self.__class__):
# make links
all = list(self.__class__)
first, previous = all[0], all[-1]
first.beats = self
self.beats = previous
#
self.assertTrue(RPS.Rock.beats is RPS.Scissors)
self.assertTrue(RPS.Scissors.beats is RPS.Paper)
self.assertTrue(RPS.Paper.beats is RPS.Rock)
def test_arduino_headers(self):
# https://stackoverflow.com/q/65048495/208880
class CHeader(Enum):
def __init_subclass__(cls, **kwds):
# write Enums to C header file
cls_name = cls.__name__
header_path = getattr(cls, '_%s__header' % cls_name)
with open(header_path, 'w') as fh:
fh.write('initial header stuff here\n')
for enum in cls:
fh.write('#define %s %r\n' % (enum.name, enum.value))
class Arduino(CHeader):
_order_ = 'ONE TWO'
__header = os.path.join(tempdir, 'arduino.h')
ONE = 1
TWO = 2
with open(os.path.join(tempdir, 'arduino.h')) as fh:
data = fh.read()
self.assertEqual(textwrap.dedent("""\
initial header stuff here
#define ONE 1
#define TWO 2
"""),
data,
)
| |
bezier_box_clear_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_clear_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_clear_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_clear_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_reset_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_reset_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_reset_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_reset_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_confirm_sharpness(self):
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status = cont.bezier_box_confirm_sharpness()
if hov_status:
break
return hov_status
def bezier_box_confirm_rotation(self):
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status = cont.bezier_box_confirm_rotation()
if hov_status:
break
return hov_status
def bezier_box_select_points(self, status):
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status = cont.bezier_box_select_points(status)
break
return hov_status
#
def set_scale(self, scale):
super().set_scale(scale)
if self.scroll_bar:
self.scroll_bar.set_scale(scale)
for cont in self.containers:
cont.set_scale(scale)
return
def set_header_bev(self, size, res):
if self.header:
self.header.set_bev(size, res)
return
def set_header_color(self, color=None, color_hover=None, color_click=None, color_font=None):
if self.header:
self.header.set_color(color=color, color_hover=color_hover,
color_click=color_click, color_font=color_font)
return
def set_header_font_size(self, size):
if self.header:
self.header.set_font_size(size)
return
def set_separation(self, sep):
self.container_separation = sep
return
def set_collapsed(self, status):
self.collapse = status
return
def set_button_bool(self, status, custom_id_filter=None):
for cont in self.containers:
cont.set_button_bool(status, custom_id_filter)
return
def set_color(self, color=None, color_outline=None, color_font=None):
if color_font:
self.color_font = color_font
super().set_color(color=color, color_outline=color_outline)
return
def set_style_color(self, color_box=None, color_row=None, color_item=None, color_hover=None, color_click=None):
if color_box != None:
self.color_box = color_box
if color_row != None:
self.color_row = color_row
if color_item != None:
self.color_item = color_item
if color_hover != None:
self.color_hover = color_hover
if color_click != None:
self.color_click = color_click
return
def set_header_icon_data(self, image=None, width=None, height=None, text_side=None):
if self.header != None:
self.header.set_icon_data(
image=image, width=width, height=height, text_side=text_side)
return
#
def __str__(self):
return 'CUI Box Container'
class CUIRowContainer(CUIContainer):
def __init__(self):
super().__init__()
self.items = []
self.color_font = (0.0, 0.0, 1.0, 1.0)
self.color_item = (0.0, 0.0, 0.3, 0.7)
self.color_hover = (0.0, 0.0, 0.5, 0.7)
self.color_click = (0.0, 0.0, 0.6, 1.0)
self.type = 'ROW'
self.items_separations = 4
self.draw_backdrop = False
return
#
def create_shape_data(self):
total_height = 0
total_height += self.vertical_margin
x_pos = 0
x_pos += self.horizontal_margin
highest = 0
# calc width of buttons
# if even divisions is bigger than max width then remove that from total and recongifure for non maxed items
avail_width = self.width - self.horizontal_margin * \
2 - self.items_separations*(len(self.items)-1)
widths = []
max_widths = 0
rem_items = 0
for i, item in enumerate(self.items):
if item.item_type == 'BOOLEAN' and item.use_button == False:
if avail_width/len(self.items) > item.parts[0].bool_box_size:
widths.append(item.parts[0].bool_box_size)
max_widths += item.parts[0].bool_box_size
else:
widths.append(None)
rem_items += 1
elif item.max_width != None:
if avail_width/len(self.items) > item.max_width:
widths.append(item.max_width)
max_widths += item.max_width
else:
widths.append(None)
rem_items += 1
else:
widths.append(None)
rem_items += 1
new_avail = avail_width - max_widths
# place items in row
for i, item in enumerate(self.items):
if widths[i] != None:
item.width = widths[i]
# Not sure what this was here for but caused issues with max width button
# x_pos += new_avail/2
else:
item.width = new_avail/rem_items
item.pos_offset[0] = x_pos
item.pos_offset[1] = -total_height
item.create_shape_data()
x_pos += item.width
if i < len(self.items)-1:
x_pos += self.items_separations
if item.height > highest:
highest = item.height
if self.width-self.horizontal_margin > x_pos:
for i, item in enumerate(self.items):
item.pos_offset[0] += (self.width -
self.horizontal_margin-x_pos)/2
# check for items that have a smaller size than highest and replace in middle of row vertically
for i, item in enumerate(self.items):
if item.height < highest:
offset = int((highest-item.height)/2)
item.pos_offset[1] -= offset
total_height += highest
total_height += self.vertical_margin
self.height = total_height
super().create_shape_data()
return
def update_batches(self, position=[0, 0]):
super().update_batches(position)
for item in self.items:
item.update_batches(
[position[0]+self.scale_pos_offset[0], position[1]+self.scale_pos_offset[1]])
return
def draw(self):
super().draw()
if self.visible:
for item in self.items:
item.draw()
return
#
def add_button(self, height, text):
but = CUIButton(height, text)
but.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.items.append(but)
return but
def add_bool(self, height, text, default=False):
boolean = CUIBoolProp(height, text, default_val=default)
boolean.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.items.append(boolean)
return boolean
def add_label(self, height, text):
label = CUILabel(height, text)
label.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.items.append(label)
return label
def add_number(self, height, text, default, decimals, step, min, max):
num = CUINumProp(height, text, default, decimals, step, min, max)
num.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
num.set_value(default)
self.items.append(num)
return num
def add_bezier_box(self, height, type, points=None):
use_default = False
if points == None:
use_default = True
if points != None:
if len(points) < 2:
use_default = True
if use_default:
bez = CUIBezierBox(height, type, [(0, 1), (1, 0)])
else:
bez = CUIBezierBox(height, type, points)
self.items.append(bez)
return bez
#
def test_click_down(self, mouse_co, shift, pos, arguments=None):
status = None
if self.hover:
for item in self.items:
if item.hover:
item.click_down = True
status = item.click_down_func(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
return status
def test_click_up(self, mouse_co, shift, pos, arguments=None):
status = None
if self.hover:
for item in self.items:
if item.hover:
item.click_down = False
status = item.click_up_func(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
return status
def click_down_move(self, mouse_co, shift, pos, arguments=None):
if self.hover:
for item in self.items:
item.click_down_move(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
return
#
def resize_width(self, width, move_fac):
self.width = width
return
def test_hover(self, mouse_co, pos):
self.clear_hover()
status = None
super().test_hover(mouse_co, [pos[0], pos[1]])
if self.hover:
for item in self.items:
i_status = item.test_hover(
mouse_co, [pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]])
if i_status:
status = i_status
return status
def filter_change_custom_id(self, tar_id, new_id):
for item in self.items:
if item.custom_id == tar_id:
item.custom_id = new_id
return
#
def clear_hover(self):
self.hover = False
for item in self.items:
item.clear_hover()
return
def reset_item_states(self, clear_hover):
for item in self.items:
item.reset_item_states(clear_hover)
return
def remove_item(self, index):
if index < len(self.items):
self.items.pop(index)
return
#
def type_add_key(self, key):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_add_key(key)
return
def type_delete_key(self):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_delete_key()
return
def type_move_pos(self, value):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_move_pos(value)
return
def type_confirm(self, arguments=None):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_confirm(arguments)
return
def type_cancel(self):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_cancel()
return
def type_backspace_key(self):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_backspace_key()
return
#
def bezier_box_delete_points(self, pos, arguments=None):
status = None
bez_id = None
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
item.bezier_box_delete_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
status = True
bez_id = item.custom_id
break
return status, bez_id
def bezier_box_sharpen_points(self, pos, offset, arguments=None):
status = False
hov_status = False
bez_id = None
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_sharpen_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], offset, arguments)
hov_status = True
bez_id = item.custom_id
break
return hov_status, bez_id, status
def bezier_box_rotate_points(self, pos, angle, arguments=None):
status = False
hov_status = False
mid_co = None
bez_id = None
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status, mid_co = item.bezier_box_rotate_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], angle, arguments)
hov_status = True
bez_id = item.custom_id
break
return hov_status, bez_id, status, mid_co
def bezier_box_clear_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_clear_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_clear_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_clear_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_reset_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_reset_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_reset_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_reset_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_confirm_sharpness(self):
hov_status = False
if self.hover:
for | |
search, such as a specified header or a query string.
"""
return pulumi.get(self, "field_to_match")
@field_to_match.setter
def field_to_match(self, value: pulumi.Input['RegexMatchSetRegexMatchTupleFieldToMatchArgs']):
pulumi.set(self, "field_to_match", value)
@property
@pulumi.getter(name="regexPatternSetId")
def regex_pattern_set_id(self) -> pulumi.Input[str]:
"""
The ID of a `WAF Regex Pattern Set`.
"""
return pulumi.get(self, "regex_pattern_set_id")
@regex_pattern_set_id.setter
def regex_pattern_set_id(self, value: pulumi.Input[str]):
pulumi.set(self, "regex_pattern_set_id", value)
@property
@pulumi.getter(name="textTransformation")
def text_transformation(self) -> pulumi.Input[str]:
"""
Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_ByteMatchTuple.html#WAF-Type-ByteMatchTuple-TextTransformation)
for all supported values.
"""
return pulumi.get(self, "text_transformation")
@text_transformation.setter
def text_transformation(self, value: pulumi.Input[str]):
pulumi.set(self, "text_transformation", value)
@pulumi.input_type
class RegexMatchSetRegexMatchTupleFieldToMatchArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
data: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: The part of the web request that you want AWS WAF to search for a specified string.
e.g. `HEADER`, `METHOD` or `BODY`.
See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
for all supported values.
:param pulumi.Input[str] data: When `type` is `HEADER`, enter the name of the header that you want to search, e.g. `User-Agent` or `Referer`.
If `type` is any other value, omit this field.
"""
pulumi.set(__self__, "type", type)
if data is not None:
pulumi.set(__self__, "data", data)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The part of the web request that you want AWS WAF to search for a specified string.
e.g. `HEADER`, `METHOD` or `BODY`.
See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
for all supported values.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[str]]:
"""
When `type` is `HEADER`, enter the name of the header that you want to search, e.g. `User-Agent` or `Referer`.
If `type` is any other value, omit this field.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data", value)
@pulumi.input_type
class RuleGroupActivatedRuleArgs:
def __init__(__self__, *,
action: pulumi.Input['RuleGroupActivatedRuleActionArgs'],
priority: pulumi.Input[float],
rule_id: pulumi.Input[str],
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['RuleGroupActivatedRuleActionArgs'] action: Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.
:param pulumi.Input[float] priority: Specifies the order in which the rules are evaluated. Rules with a lower value are evaluated before rules with a higher value.
:param pulumi.Input[str] rule_id: The ID of a `waf_rule`
:param pulumi.Input[str] type: The rule type, either `REGULAR`, `RATE_BASED`, or `GROUP`. Defaults to `REGULAR`.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "priority", priority)
pulumi.set(__self__, "rule_id", rule_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def action(self) -> pulumi.Input['RuleGroupActivatedRuleActionArgs']:
"""
Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['RuleGroupActivatedRuleActionArgs']):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[float]:
"""
Specifies the order in which the rules are evaluated. Rules with a lower value are evaluated before rules with a higher value.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[float]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="ruleId")
def rule_id(self) -> pulumi.Input[str]:
"""
The ID of a `waf_rule`
"""
return pulumi.get(self, "rule_id")
@rule_id.setter
def rule_id(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The rule type, either `REGULAR`, `RATE_BASED`, or `GROUP`. Defaults to `REGULAR`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RuleGroupActivatedRuleActionArgs:
def __init__(__self__, *,
type: pulumi.Input[str]):
"""
:param pulumi.Input[str] type: The rule type, either `REGULAR`, `RATE_BASED`, or `GROUP`. Defaults to `REGULAR`.
"""
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The rule type, either `REGULAR`, `RATE_BASED`, or `GROUP`. Defaults to `REGULAR`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class RulePredicateArgs:
def __init__(__self__, *,
data_id: pulumi.Input[str],
negated: pulumi.Input[bool],
type: pulumi.Input[str]):
"""
:param pulumi.Input[str] data_id: A unique identifier for a predicate in the rule, such as Byte Match Set ID or IPSet ID.
:param pulumi.Input[bool] negated: Set this to `false` if you want to allow, block, or count requests
based on the settings in the specified `waf_byte_match_set`, `waf_ipset`, `waf.SizeConstraintSet`, `waf.SqlInjectionMatchSet` or `waf.XssMatchSet`.
For example, if an IPSet includes the IP address `192.0.2.44`, AWS WAF will allow or block requests based on that IP address.
If set to `true`, AWS WAF will allow, block, or count requests based on all IP addresses except `192.0.2.44`.
:param pulumi.Input[str] type: The type of predicate in a rule. Valid values: `ByteMatch`, `GeoMatch`, `IPMatch`, `RegexMatch`, `SizeConstraint`, `SqlInjectionMatch`, or `XssMatch`.
"""
pulumi.set(__self__, "data_id", data_id)
pulumi.set(__self__, "negated", negated)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataId")
def data_id(self) -> pulumi.Input[str]:
"""
A unique identifier for a predicate in the rule, such as Byte Match Set ID or IPSet ID.
"""
return pulumi.get(self, "data_id")
@data_id.setter
def data_id(self, value: pulumi.Input[str]):
pulumi.set(self, "data_id", value)
@property
@pulumi.getter
def negated(self) -> pulumi.Input[bool]:
"""
Set this to `false` if you want to allow, block, or count requests
based on the settings in the specified `waf_byte_match_set`, `waf_ipset`, `waf.SizeConstraintSet`, `waf.SqlInjectionMatchSet` or `waf.XssMatchSet`.
For example, if an IPSet includes the IP address `192.0.2.44`, AWS WAF will allow or block requests based on that IP address.
If set to `true`, AWS WAF will allow, block, or count requests based on all IP addresses except `192.0.2.44`.
"""
return pulumi.get(self, "negated")
@negated.setter
def negated(self, value: pulumi.Input[bool]):
pulumi.set(self, "negated", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of predicate in a rule. Valid values: `ByteMatch`, `GeoMatch`, `IPMatch`, `RegexMatch`, `SizeConstraint`, `SqlInjectionMatch`, or `XssMatch`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SizeConstraintSetSizeConstraintArgs:
def __init__(__self__, *,
comparison_operator: pulumi.Input[str],
field_to_match: pulumi.Input['SizeConstraintSetSizeConstraintFieldToMatchArgs'],
size: pulumi.Input[float],
text_transformation: pulumi.Input[str]):
"""
:param pulumi.Input[str] comparison_operator: The type of comparison you want to perform.
e.g. `EQ`, `NE`, `LT`, `GT`.
See [docs](https://docs.aws.amazon.com/waf/latest/APIReference/API_wafRegional_SizeConstraint.html) for all supported values.
:param pulumi.Input['SizeConstraintSetSizeConstraintFieldToMatchArgs'] field_to_match: Specifies where in a web request to look for the size constraint.
:param pulumi.Input[float] size: The size in bytes that you want to compare against the size of the specified `field_to_match`.
Valid values are between 0 - 21474836480 bytes (0 - 20 GB).
:param pulumi.Input[str] text_transformation: Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
If you specify a transformation, AWS WAF performs the transformation on `field_to_match` before inspecting a request for a match.
e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_SizeConstraint.html#WAF-Type-SizeConstraint-TextTransformation)
for all supported values.
**Note:** if you choose `BODY` as `type`, you must choose `NONE` because CloudFront forwards only the first 8192 bytes for inspection.
"""
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "field_to_match", field_to_match)
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "text_transformation", text_transformation)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> pulumi.Input[str]:
"""
The type of comparison you want to perform.
e.g. `EQ`, `NE`, `LT`, `GT`.
See [docs](https://docs.aws.amazon.com/waf/latest/APIReference/API_wafRegional_SizeConstraint.html) for all supported values.
"""
return pulumi.get(self, "comparison_operator")
@comparison_operator.setter
def comparison_operator(self, value: pulumi.Input[str]):
pulumi.set(self, "comparison_operator", value)
@property
@pulumi.getter(name="fieldToMatch")
def field_to_match(self) -> pulumi.Input['SizeConstraintSetSizeConstraintFieldToMatchArgs']:
"""
Specifies where in a web request to look for the size constraint.
"""
return pulumi.get(self, "field_to_match")
@field_to_match.setter
def field_to_match(self, value: pulumi.Input['SizeConstraintSetSizeConstraintFieldToMatchArgs']):
pulumi.set(self, "field_to_match", value)
@property
@pulumi.getter
def size(self) -> pulumi.Input[float]:
"""
The size in bytes that you want to compare against the size of the specified `field_to_match`.
Valid values are between 0 - 21474836480 bytes (0 - 20 GB).
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[float]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="textTransformation")
def text_transformation(self) -> pulumi.Input[str]:
"""
Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
If you specify a transformation, AWS WAF performs the transformation on `field_to_match` before inspecting a request for a match.
e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_SizeConstraint.html#WAF-Type-SizeConstraint-TextTransformation)
for all supported values.
**Note:** if you choose `BODY` as `type`, you must choose `NONE` because CloudFront forwards only the first 8192 bytes for inspection.
"""
return pulumi.get(self, "text_transformation")
@text_transformation.setter
def text_transformation(self, value: pulumi.Input[str]):
pulumi.set(self, "text_transformation", value)
@pulumi.input_type
class SizeConstraintSetSizeConstraintFieldToMatchArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
data: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: The part of the web request that you want AWS WAF to search for a specified string.
e.g. `HEADER`, `METHOD` or `BODY`.
See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
for all supported values.
:param pulumi.Input[str] data: When `type` | |
import math
from collections import Counter
import pytest
import torch
from neuralpp.inference.graphical_model.representation.factor.pytorch_table_factor import PyTorchTableFactor
from neuralpp.inference.graphical_model.representation.table.pytorch_log_table import (
PyTorchLogTable,
)
from neuralpp.inference.graphical_model.representation.table.pytorch_table import (
BatchCoordinatesDoNotAgreeException,
PyTorchTable,
)
from neuralpp.inference.graphical_model.variable.discrete_variable import DiscreteVariable
from neuralpp.inference.graphical_model.variable.integer_variable import IntegerVariable
from neuralpp.util.discrete_sampling import discrete_sample
@pytest.fixture
def x():
return IntegerVariable("x", 3)
@pytest.fixture
def y():
return IntegerVariable("y", 2)
@pytest.fixture
def z():
return IntegerVariable("z", 2)
@pytest.fixture(params=["Non-log space", "Log space"])
def log_space(request):
return request.param == "Log space"
@pytest.fixture(params=["Non-log space", "Log space"])
def log_space1(request):
return request.param == "Log space"
@pytest.fixture(params=["Non-log space", "Log space"])
def log_space2(request):
return request.param == "Log space"
@pytest.fixture(params=["Non-log space", "Log space"])
def log_space_expected(request):
return request.param == "Log space"
@pytest.fixture(params=[None, 0, 4])
def batch_size(request):
return request.param
@pytest.fixture(params=[None, 0, 4])
def batch_size1(request):
return request.param
@pytest.fixture(params=[None, 0, 4])
def batch_size2(request):
return request.param
def test_assignments(x, y, log_space):
factor = PyTorchTableFactor.from_function(
(x, y), lambda x, y: float(x == y), log_space=log_space
)
print(list(factor.assignments()))
assert list(factor.assignments()) == [
(0, 0),
(0, 1),
(1, 0),
(1, 1),
(2, 0),
(2, 1),
]
def test_new_instance(x, y, log_space):
factor1 = PyTorchTableFactor.from_function(
(x, y), lambda x, y: float(x == y), log_space=log_space
)
factor2 = factor1.new_instance(factor1.variables, factor1.table)
assert factor2 is not factor1
assert factor2 == factor2
def batch_function_adapter_from_function_without_batch_row_index(
function_without_batch_row_index, batch
):
def result(*args):
args_without_batch_row_index = args[1:] if batch else args
potential_without_batch_row_index = function_without_batch_row_index(
*args_without_batch_row_index
)
if batch:
batch_row_index = args[0]
potential = (batch_row_index + 1) * potential_without_batch_row_index
else:
potential = potential_without_batch_row_index
return potential
return result
def batch_function_adapter_from_function_with_batch_row_index(
function_with_batch_row_index, batch
):
if batch:
return function_with_batch_row_index
else:
return lambda *rest: function_with_batch_row_index(0, *rest)
def table_factor_from_function_without_batch_row_index(
variables, function_without_batch_row_index, log_space, batch_size
):
batch = batch_size is not None
function = batch_function_adapter_from_function_without_batch_row_index(
function_without_batch_row_index, batch
)
factor = PyTorchTableFactor.from_function(
variables, function, log_space=log_space, batch_size=batch_size
)
return factor
def table_factor_from_function_with_batch_row_index(
variables, function_with_batch_row_index, log_space, batch_size
):
batch = batch_size is not None
function = batch_function_adapter_from_function_with_batch_row_index(
function_with_batch_row_index, batch
)
factor = PyTorchTableFactor.from_function(
variables, function, log_space=log_space, batch_size=batch_size
)
return factor
def test_condition(x, y, log_space, log_space_expected, batch_size):
def expected_table_factor(variables, function):
return table_factor_from_function_with_batch_row_index(
variables, function, log_space_expected, batch_size
)
fixy = lambda i, x, y: float((10 ** i) * (x * 3 + y))
factor = table_factor_from_function_with_batch_row_index(
(x, y), fixy, log_space, batch_size
)
table_class_to_use = PyTorchLogTable if log_space_expected else PyTorchTable
tests = [
({}, factor),
({x: 0}, expected_table_factor((y,), lambda i, y: fixy(i, 0, y))),
({x: 1}, expected_table_factor((y,), lambda i, y: fixy(i, 1, y))),
({y: 0}, expected_table_factor((x,), lambda i, x: fixy(i, x, 0))),
({y: 1}, expected_table_factor((x,), lambda i, x: fixy(i, x, 1))),
({x: 0, y: 0}, expected_table_factor(tuple(), lambda i: fixy(i, 0, 0))),
({x: 1, y: 0}, expected_table_factor(tuple(), lambda i: fixy(i, 1, 0))),
(
{x: slice(None), y: 0},
expected_table_factor((x,), lambda i, x: fixy(i, x, 0)),
),
(
{x: slice(None), y: 1},
expected_table_factor((x,), lambda i, x: fixy(i, x, 1)),
),
(
{x: 0, y: slice(None)},
expected_table_factor((y,), lambda i, y: fixy(i, 0, y)),
),
(
{x: 1, y: slice(None)},
expected_table_factor((y,), lambda i, y: fixy(i, 1, y)),
),
({x: slice(None), y: slice(None)}, expected_table_factor((x, y), fixy)),
]
run_condition_tests(factor, tests)
tests_for_batch_size_different_from_zero = [
(
{x: [0, 2, 2, 2], y: 0},
PyTorchTableFactor(
tuple(),
[fixy(0, 0, 0), fixy(0, 2, 0), fixy(0, 2, 0), fixy(0, 2, 0)],
log_space=log_space_expected,
batch=True,
)
if batch_size is None
else PyTorchTableFactor(
tuple(),
[fixy(0, 0, 0), fixy(1, 2, 0), fixy(2, 2, 0), fixy(3, 2, 0)],
log_space=log_space_expected,
batch=True,
)
if batch_size == 4
else None,
),
(
{x: [0, 2, 2, 2], y: [0, 1, 0, 1]},
PyTorchTableFactor(
tuple(),
[fixy(0, 0, 0), fixy(0, 2, 1), fixy(0, 2, 0), fixy(0, 2, 1)],
log_space=log_space_expected,
batch=True,
)
if batch_size is None
else PyTorchTableFactor(
tuple(),
[fixy(0, 0, 0), fixy(1, 2, 1), fixy(2, 2, 0), fixy(3, 2, 1)],
log_space=log_space_expected,
batch=True,
)
if batch_size == 4
else None,
),
(
{x: 2, y: [0, 1, 0, 1]},
PyTorchTableFactor(
tuple(),
[fixy(0, 2, 0), fixy(0, 2, 1), fixy(0, 2, 0), fixy(0, 2, 1)],
log_space=log_space_expected,
batch=True,
)
if batch_size is None
else PyTorchTableFactor(
tuple(),
[fixy(0, 2, 0), fixy(1, 2, 1), fixy(2, 2, 0), fixy(3, 2, 1)],
log_space=log_space_expected,
batch=True,
)
if batch_size == 4
else None,
),
]
if batch_size is None or batch_size != 0:
run_condition_tests(factor, tests_for_batch_size_different_from_zero)
if batch_size is not None:
illegal_conditionings_for_batches = [
{
x: [1] * (batch_size + 2),
y: [1] * (batch_size + 2),
}, # does not coincide with batch size
# note that using batch_size + 1 would result in [1] coordinates for batch_size == 0,
# which have length 1 and are therefore *not* considered batch_coordinates, so that would not be illegal!
# We use batch_size + 2 to get an illegal case for both batch_size == 0 and batch_size == 4.
{x: [0, 1], y: [0, 1, 0]}, # batch coordinates do not coincide
]
for illegal_conditioning_for_batch in illegal_conditionings_for_batches:
try:
factor[illegal_conditioning_for_batch]
raise AssertionError(
f"Should have thrown a {BatchCoordinatesDoNotAgreeException.__name__}"
)
except BatchCoordinatesDoNotAgreeException:
pass
if batch_size is None:
illegal_conditionings_for_non_batches = [
{x: [0, 1], y: [0, 1, 0]}, # batch coordinates do not coincide
]
for illegal_conditioning_for_non_batch in illegal_conditionings_for_non_batches:
try:
factor[illegal_conditioning_for_non_batch]
raise AssertionError(
f"Should have thrown a {BatchCoordinatesDoNotAgreeException.__name__}"
)
except BatchCoordinatesDoNotAgreeException:
pass
def run_condition_tests(factor, tests):
for assignment_dict, expected_factor in tests:
actual_factor = factor.condition(assignment_dict)
print(f"factor.condition({assignment_dict}) = {actual_factor}")
assert actual_factor == expected_factor
def test_get_item(x, y, log_space, log_space_expected, batch_size):
factor = table_factor_from_function_without_batch_row_index(
(x, y), lambda x, y: float(x == y), log_space, batch_size
)
actual = factor[{x: 0, y: 0}]
print(f"actual: {actual}")
expected_value_0_0 = 1.0
batch = batch_size is not None
if batch:
# (batch row index + 1) * expected_value_0_0
expected = torch.tensor(
[
expected_value_0_0 * (batch_row_index + 1)
for batch_row_index in range(batch_size)
]
)
else:
expected = torch.tensor(expected_value_0_0)
print(f"expected: {expected}")
assert torch.allclose(actual, expected)
def test_mul(x, y, z, log_space1, log_space2, batch_size):
f_x_y = lambda x, y: float((x + 1) * (y + 1))
f_y_z = lambda y, z: float((y + 1) * (z + 1) * 10)
factor1 = table_factor_from_function_without_batch_row_index(
(x, y), f_x_y, log_space1, batch_size
)
factor2 = table_factor_from_function_without_batch_row_index(
(y, z), f_y_z, log_space2, batch_size
)
batch = batch_size is not None
def f_x_y_z(*args):
if batch:
batch_row_index, x, y, z = args
batch_aware_f_x_y = (batch_row_index + 1) * f_x_y(x, y)
batch_aware_f_y_z = (batch_row_index + 1) * f_y_z(y, z)
result = batch_aware_f_x_y * batch_aware_f_y_z
print()
print(f"batch_row_index: {batch_row_index}")
print(f"batch_row_index + 1: {(batch_row_index + 1)}")
print(f"x: {x}")
print(f"y: {y}")
print(f"z: {z}")
print(f"f(x,y): {f_x_y(x, y)}")
print(f"f(y,z): {f_y_z(y, z)}")
print(
f"(batch_row_index + 1) * f_x_y(x,y) * (batch_row_index + 1) * f_y_z(y,z): {result}"
)
else:
x, y, z = args
result = f_x_y(x, y) * f_y_z(y, z)
print()
print(f"x: {x}")
print(f"y: {y}")
print(f"z: {z}")
print(f"f(x,y): {f_x_y(x,y)}")
print(f"f(y,z): {f_y_z(y,z)}")
print(f"f_x_y(x,y) * f_y_z(y,z): {result}")
return result
expected_product = PyTorchTableFactor.from_function(
(x, y, z), f_x_y_z, log_space_expected, batch_size
)
product = factor1 * factor2
print(f"factor1: {factor1}")
print(f"factor2: {factor2}")
print(f"factor1*factor2 : {product}")
print(f"expected product: {expected_product}")
assert product == expected_product
def get_assignment_index(assignment, variables):
current_stride = 1
total = 0
for i, v in reversed(list(enumerate(variables))):
total += assignment[i] * current_stride
current_stride *= v.cardinality
return total
def test_get_assignment_index(x, y, z):
variables = [x, y, z]
for i, v in enumerate(variables):
selected_variables = variables[:i]
assignment_index = 0
for assignment in DiscreteVariable.assignments_product(selected_variables):
assert assignment_index == get_assignment_index(
assignment, selected_variables
)
assignment_index += 1
def test_argmax(x, y, z, log_space, batch_size):
def fixyz(i, x, y, z):
return sum(v * 10 ** (j + 1) for j, v in enumerate([i, x, y, z]))
factor = table_factor_from_function_with_batch_row_index(
(x, y, z), fixyz, log_space, batch_size
)
if batch_size is None:
expected = {x: 2, y: 1, z: 1}
else:
expected = {
x: torch.tensor([2] * batch_size),
y: torch.tensor([1] * batch_size),
z: torch.tensor([1] * batch_size),
}
actual = factor.argmax()
for v in (x, y, z):
assert actual[v].eq(expected[v]).all()
def test_sample(x, y, z, log_space, batch_size):
for number_of_variables in range(1, 3):
variables = [x, y, z][0:number_of_variables]
cardinalities = [v.cardinality for v in variables]
number_of_assignments = math.prod(v.cardinality for v in variables)
potentials = list(range(number_of_assignments))
probabilities = torch.tensor(potentials, dtype=torch.float) / sum(potentials)
if batch_size is None:
def f(*assignment):
return get_assignment_index(assignment, variables)
else:
def f(*assignment):
batch_index, assignment = assignment[0], assignment[1:]
return get_assignment_index(assignment, variables)
factor = PyTorchTableFactor.from_function(
variables, f, log_space=log_space, batch_size=batch_size
).normalize()
# TODO: we should be able to sample from neuralpp.unnormalized factors
n = 10000
batch_samples = [factor.sample() for i in range(n)]
effective_batch_size = batch_size if batch_size is not None else 1
samples_per_factor_batch_row = [
[tuple(batch_samples[j][i].tolist()) for j in range(n)]
for i in range(effective_batch_size)
]
for batch_row in range(effective_batch_size):
samples_for_row = samples_per_factor_batch_row[batch_row]
count = Counter(samples_for_row)
counts_in_order_of_assignment_index = [
| |
from __future__ import absolute_import, division, print_function
import itertools as it
import os
import gzip
import bz2
from functools import partial
from contextlib import contextmanager
from ..dispatch import dispatch
from cytoolz import partition_all, merge, keyfilter, pluck
from toolz import concat, get, pipe, identity, take
from toolz.curried import map, get
from dynd import nd
import pandas as pd
from datashape.discovery import discover, null, unpack
from datashape import (dshape, Record, Option, Fixed, Unit, Tuple, string,
DataShape)
import datashape as ds
from datashape.predicates import isdimension
import blaze as bz
from .pandas_dtype import dshape_to_pandas
from .core import DataDescriptor
from ..resource import resource
from ..utils import nth, nth_list, keywords
from .. import compatibility
from ..compatibility import SEEK_END, builtins, _strtypes, _inttypes
from ..compatibility import zip, PY2
from .utils import ordered_index, listpack, coerce
import csv
__all__ = ['CSV', 'drop']
numtypes = frozenset(ds.integral.types) | frozenset(ds.floating.types)
na_values = frozenset(pd.io.parsers._NA_VALUES)
read_csv_kwargs = set(keywords(pd.read_csv))
assert read_csv_kwargs
def clean_dialect(dialect):
""" Make a csv dialect apprpriate for pandas.read_csv """
dialect = keyfilter(read_csv_kwargs.__contains__,
dialect)
# handle windows
if dialect['lineterminator'] == '\r\n':
dialect['lineterminator'] = None
return dialect
to_csv_kwargs = set(keywords(pd.core.format.CSVFormatter.__init__))
assert to_csv_kwargs
DEFAULT_ENCODING = 'utf-8'
def has_header(sample, encoding=DEFAULT_ENCODING):
"""Check whether a piece of sample text from a file has a header
Parameters
----------
sample : str
Text to check for existence of a header
encoding : str
Encoding to use if ``isinstance(sample, bytes)``
Returns
-------
h : bool or NoneType
None if an error is thrown, otherwise ``True`` if a header exists and
``False`` otherwise.
"""
sniffer = csv.Sniffer().has_header
try:
return sniffer(sample)
except TypeError:
return sniffer(sample.decode(encoding))
except csv.Error:
return None
def get_dialect(sample, dialect=None, **kwargs):
try:
dialect = csv.get_dialect(dialect)
except csv.Error:
try:
dialect = csv.Sniffer().sniff(sample)
except csv.Error:
dialect = csv.excel
assert dialect is not None
# Convert dialect to dictionary
dialect = dict((key, getattr(dialect, key))
for key in dir(dialect) if not key.startswith('_'))
# Update dialect with any keyword arguments passed in
# E.g. allow user to override with delimiter=','
for k, v in kwargs.items():
if k in dialect:
dialect[k] = v
return dialect
def discover_dialect(sample, dialect=None, **kwargs):
"""Discover a CSV dialect from string sample and additional keyword
arguments
Parameters
----------
sample : str
dialect : str or csv.Dialect
Returns
-------
dialect : dict
"""
dialect = get_dialect(sample, dialect, **kwargs)
assert dialect
# Pandas uses sep instead of delimiter.
# Lets support that too
if 'sep' in kwargs:
dialect['delimiter'] = kwargs['sep']
else:
# but only on read_csv, to_csv doesn't accept delimiter so we need sep
# for sure
dialect['sep'] = dialect['delimiter']
# line_terminator is for to_csv
dialect['lineterminator'] = dialect['line_terminator'] = \
dialect.get('line_terminator', dialect.get('lineterminator', os.linesep))
return dialect
@contextmanager
def csvopen(csv, **kwargs):
try:
f = csv.open(csv.path, encoding=csv.encoding, **kwargs)
except (TypeError, ValueError): # TypeError for py2 ValueError for py3
f = csv.open(csv.path, **kwargs)
yield f
try:
f.close()
except AttributeError:
pass
def get_sample(csv, size=16384):
path = csv.path
if os.path.exists(path) and csv.mode != 'w':
with csvopen(csv, mode='rt') as f:
return f.read(size)
return ''
def ext(path):
_, e = os.path.splitext(path)
return e.lstrip('.')
def safely_option(ds):
""" Wrap certain types in an option type
>>> safely_option('int32')
?int32
>>> safely_option('?int32')
?int32
>>> safely_option('float64')
ctype("float64")
"""
if isinstance(ds, _strtypes):
ds = dshape(ds)
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
if isinstance(ds, Unit) and 'int' in str(ds) or 'date' in str(ds):
return Option(ds)
return ds
def discover_csv(path, encoding=DEFAULT_ENCODING, nrows_discovery=50,
header=None, dialect=None, types=None, columns=None,
typehints=None):
""" Discover datashape of CSV file """
df = pd.read_csv(path,
dtype='O',
encoding=encoding,
chunksize=nrows_discovery,
compression={'gz': 'gzip',
'bz2': 'bz2'}.get(ext(path)),
header=0 if header else None,
**clean_dialect(dialect)).get_chunk()
if not types:
L = (df.fillna('')
.to_records(index=False)
.tolist())
rowtype = discover(L).subshape[0]
if isinstance(rowtype[0], Tuple):
types = rowtype[0].dshapes
types = [unpack(t) for t in types]
types = [string if t == null else t for t in types]
types = [safely_option(t) for t in types]
elif (isinstance(rowtype[0], Fixed) and
isinstance(rowtype[1], Unit)):
types = int(rowtype[0]) * [rowtype[1]]
else:
raise ValueError("Could not discover schema from data.\n"
"Please specify schema.")
if not columns:
if header:
columns = list(df.columns)
else:
columns = ['_%d' % i for i in range(len(types))]
if typehints:
types = [typehints.get(c, t) for c, t in zip(columns, types)]
return dshape(Record(list(zip(columns, types))))
class CSV(DataDescriptor):
"""
Blaze data descriptor to a CSV file.
This reads in a portion of the file to discover the CSV dialect
(i.e delimiter, endline character, ...), the column names (from the header)
and the types (by looking at the values in the first 50 lines. Often this
just works however for complex datasets you may have to supply more
metadata about your file.
For full automatic handling just specify the filename
>>> dd = CSV('myfile.csv') # doctest: +SKIP
Standard csv parsing terms like ``delimiter`` are available as keyword
arguments. See the standard ``csv`` library for more details on dialects.
>>> dd = CSV('myfile.csv', delimiter='\t') # doctest: +SKIP
If column names are not present in the header, specify them with the
columns keyword argument
>>> dd = CSV('myfile.csv',
... columns=['id', 'name', 'timestamp', 'value']) # doctest: +SKIP
If a few types are not correctly discovered from the data then add additional
type hints.
>>> dd = CSV('myfile.csv',
... columns=['id', 'name', 'timestamp', 'value'],
... typehints={'timestamp': 'datetime'}) # doctest: +SKIP
Alternatively specify all types manually
>>> dd = CSV('myfile.csv',
... columns=['id', 'name', 'timestamp', 'value'],
... types=['int', 'string', 'datetime', 'float64']) # doctest: +SKIP
Or specify a datashape explicitly
>>> schema = '{id: int, name: string, timestamp: datetime, value: float64}'
>>> dd = CSV('myfile.csv', schema=schema) # doctest: +SKIP
Parameters
----------
path : string
A path string for the CSV file.
schema : string or datashape
A datashape (or its string representation) of the schema
in the CSV file.
dialect : string or csv.Dialect instance
The dialect as understood by the `csv` module in Python standard
library. If not specified, a value is guessed.
header : boolean
Whether the CSV file has a header or not. If not specified a value
is guessed.
open : context manager
An alternative method to open the file.
For examples: gzip.open, codecs.open
nrows_discovery : int
Number of rows to read when determining datashape
"""
def __init__(self, path, mode='rt', schema=None, columns=None, types=None,
typehints=None, dialect=None, header=None, open=open,
nrows_discovery=50, chunksize=1024,
encoding=DEFAULT_ENCODING, **kwargs):
if 'r' in mode and not os.path.isfile(path):
raise ValueError('CSV file "%s" does not exist' % path)
if schema is None and 'w' in mode:
raise ValueError('Please specify schema for writable CSV file')
self.path = path
self.mode = mode
self.open = {'gz': gzip.open, 'bz2': bz2.BZ2File}.get(ext(path), open)
self._abspath = os.path.abspath(path)
self.chunksize = chunksize
self.encoding = encoding
sample = get_sample(self)
self.dialect = dialect = discover_dialect(sample, dialect, **kwargs)
if header is None:
header = has_header(sample, encoding=encoding)
elif isinstance(header, int):
header = True
self.header = header
if not schema and 'w' not in mode:
schema = discover_csv(path, encoding=encoding, dialect=dialect,
header=self.header, typehints=typehints,
types=types, columns=columns,
nrows_discovery=nrows_discovery)
self._schema = schema
self.header = header
if 'w' not in mode:
try:
nd.array(list(take(10, self._iter(chunksize=10))),
dtype=str(schema))
except (ValueError, TypeError) as e:
raise ValueError("Automatic datashape discovery failed\n"
"Discovered the following datashape: %s\n"
"But DyND generated the following error: %s\n"
"Consider providing type hints using "
"typehints={'column-name': 'type'}\n"
"like typehints={'start-time': 'string'}"
% (schema, e.args[0]))
def get_py(self, key):
return self._get_py(ordered_index(key, self.dshape))
def _get_py(self, key):
if isinstance(key, tuple):
assert len(key) == 2
rows, cols = key
usecols = cols
ds = self.dshape.subshape[rows, cols]
usecols = None if isinstance(usecols, slice) else listpack(usecols)
else:
rows = key
ds = self.dshape.subshape[rows]
usecols = None
if isinstance(ds, DataShape) and isdimension(ds[0]):
ds = ds.subshape[0]
seq = self._iter(usecols=usecols)
if isinstance(key, tuple) and isinstance(cols, _strtypes + _inttypes):
seq = pluck(0, seq)
seq = coerce(ds, seq)
if isinstance(rows, compatibility._inttypes):
line = nth(rows, seq)
try:
return next(line).item()
except TypeError:
try:
return line.item()
except AttributeError:
return line
elif isinstance(rows, list):
return nth_list(rows, seq)
elif isinstance(rows, slice):
return it.islice(seq, rows.start, rows.stop, rows.step)
else:
raise IndexError("key %r is not valid" % rows)
def pandas_read_csv(self, usecols=None, **kwargs):
""" Use pandas.read_csv with the right keyword arguments
In particular we know what dtypes should be, which columns are dates,
etc...
"""
dtypes, dates = dshape_to_pandas(self.schema)
if usecols:
if builtins.all(isinstance(c, int) for c in usecols):
usecols = get(usecols, self.columns)
dates = [name for name in dates if name in usecols]
header = kwargs.pop('header', self.header)
header = 0 if self.header else None
result = pd.read_csv(self.path,
names=kwargs.pop('names', self.columns),
usecols=usecols,
| |
not in ncandidates:
ncandidates.add(ndir)
candidates.append(dir)
elif nhome and ndir.startswith(nhome) \
and ndir[len(nhome)+1:].count(os.path.sep) < 2:
if ndir not in ncandidates:
ncandidates.add(ndir)
candidates.append(dir)
#print candidates
print("""\
It appears that `go' is not setup properly in your environment. Typing
`go' must end up calling `go.bat' somewhere on your PATH and *not* `go.py'
directly. This is how `go' can change the directory in your current shell.
You'll need a file "go.bat" with the following contents in a directory on
your PATH:
%s""" % _indent(driver))
if candidates:
print("\nCandidate directories are:\n")
for i, dir in enumerate(candidates):
print(" [%s] %s" % (i+1, dir))
print()
answer = _query_custom_answers(
"If you would like this script to create `go.bat' for you in\n"
"one of these directories, enter the number of that\n"
"directory. Otherwise, enter 'no' to not create `go.bat'.",
[str(i+1) for i in range(len(candidates))] + ["&no"],
default="no",
)
if answer == "no":
pass
else:
dir = candidates[int(answer)-1]
path = join(dir, "go.bat")
print("\nCreating `%s'." % path)
print("You should now be able to run `go --help'.")
open(path, 'w').write(driver)
elif shell == "sh":
print("""\
It appears that `go' is not setup properly in your environment. Typing
`go' must end up calling the Bash function `go' and *not* `go.py'
directly. This is how `go' can change the directory in your current shell.
You'll need to have the following function in your shell startup script
(e.g. `.bashrc' or `.profile'):
%s
To just play around in your current shell, simple cut and paste this
function.""" % _indent(driver))
candidates = ["~/.bashrc", "~/.bash_profile", "~/.bash_login",
"~/.profile"]
candidates = [c for c in candidates if exists(expanduser(c))]
if candidates:
q = """\
Would you like this script to append `function go' to one of the following
Bash initialization scripts? If so, enter the number of the listed file.
Otherwise, enter `no'."""
for i, path in enumerate(candidates):
q += "\n (%d) %s" % (i+1, path)
answers = [str(i+1) for i in range(len(candidates))] + ["&no"]
print()
answer = _query_custom_answers(q, answers, default="no")
if answer == "no":
pass
else:
path = candidates[int(answer)-1]
xpath = expanduser(path)
f = codecs.open(xpath, 'a', 'utf-8')
try:
f.write('\n\n'+driver)
finally:
f.close()
print()
print("`function go' appended to `%s'." % path)
print("Run `source %s` to enable this for this shell." % path)
print("You should then be able to run `go --help'.")
else:
print("""\
It appears that `go' is not setup properly in your environment. Typing
`go' must end up calling the shell function `go' and *not* `go.py'
directly. This is how `go' can change the directory in your current shell.
The appropriate function for the *Bash* shell is this:
%s
If you know the appropriate translation for your shell (%s) I'd appreciate
your feedback on that so I can update this script. Please add an issue here:
http://code.google.com/p/go-tool/issues/list
Thanks!""" % (_indent(_gDriverFromShell["sh"]), shell))
print("* * *")
# Recipe: query_custom_answers (1.0)
def _query_custom_answers(question, answers, default=None):
"""Ask a question via raw_input() and return the chosen answer.
@param question {str} Printed on stdout before querying the user.
@param answers {list} A list of acceptable string answers. Particular
answers can include '&' before one of its letters to allow a
single letter to indicate that answer. E.g., ["&yes", "&no",
"&quit"]. All answer strings should be lowercase.
@param default {str, optional} A default answer. If no default is
given, then the user must provide an answer. With a default,
just hitting <Enter> is sufficient to choose.
"""
prompt_bits = []
answer_from_valid_choice = {
# <valid-choice>: <answer-without-&>
}
clean_answers = []
for answer in answers:
if '&' in answer and not answer.index('&') == len(answer)-1:
head, tail = answer.split('&', 1)
prompt_bits.append(head.lower()+tail.lower().capitalize())
clean_answer = head+tail
shortcut = tail[0].lower()
else:
prompt_bits.append(answer.lower())
clean_answer = answer
shortcut = None
if default is not None and clean_answer.lower() == default.lower():
prompt_bits[-1] += " (default)"
answer_from_valid_choice[clean_answer.lower()] = clean_answer
if shortcut:
answer_from_valid_choice[shortcut] = clean_answer
clean_answers.append(clean_answer.lower())
# This is what it will look like:
# Frob nots the zids? [Yes (default), No, quit] _
# Possible alternatives:
# Frob nots the zids -- Yes, No, quit? [y] _
# Frob nots the zids? [*Yes*, No, quit] _
# Frob nots the zids? [_Yes_, No, quit] _
# Frob nots the zids -- (y)es, (n)o, quit? [y] _
prompt = " [%s] " % ", ".join(prompt_bits)
leader = question + prompt
if len(leader) + max(len(c) for c in answer_from_valid_choice) > 78:
leader = question + '\n' + prompt.lstrip()
leader = leader.lstrip()
valid_choices = answer_from_valid_choice.keys()
admonishment = "*** Please respond with '%s' or '%s'. ***" \
% ("', '".join(clean_answers[:-1]), clean_answers[-1])
while 1:
sys.stdout.write(leader)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in answer_from_valid_choice:
return answer_from_valid_choice[choice]
else:
sys.stdout.write("\n"+admonishment+"\n\n\n")
# Recipe: indent (0.2.1)
def _indent(s, width=4, skip_first_line=False):
"""_indent(s, [width=4]) -> 's' indented by 'width' spaces
The optional "skip_first_line" argument is a boolean (default False)
indicating if the first line should NOT be indented.
"""
lines = s.splitlines(1)
indentstr = ' '*width
if skip_first_line:
return indentstr.join(lines)
else:
return indentstr + indentstr.join(lines)
def _normpath(path):
from os.path import normcase, normpath
n = normcase(normpath(path))
if n.endswith(os.path.sep):
n = n[:-1]
elif os.path.altsep and n.endswith(os.path.altsep):
n = n[:-1]
return n
#---- mainline
def main(argv):
# Must write out a no-op shell script before any error can happen
# otherwise the script from the previous run could result.
try:
shellScript = os.environ[_envvar]
except KeyError:
if _subsystem == "windows":
pass # Don't complain about missing console setup.
return setup()
else:
generateShellScript(shellScript) # no-op, overwrite old one
# Parse options
try:
shortopts = "hVcsadl"
longopts = ['help', 'version', 'cd', 'set', 'add-current',
'delete', 'list']
if sys.platform.startswith("win"):
shortopts += "o"
longopts.append("open")
optlist, args = getopt.getopt(argv[1:], shortopts, longopts)
except getopt.GetoptError as ex:
msg = ex.msg
if ex.opt in ('d', 'dump'):
msg += ": old -d|--dump option is now -l|--list"
sys.stderr.write("go: error: %s.\n" % msg)
sys.stderr.write("See 'go --help'.\n")
return 1
action = "cd"
for opt, optarg in optlist:
if opt in ('-h', '--help'):
sys.stdout.write(__doc__)
return 0
elif opt in ('-V', '--version'):
sys.stdout.write("go %s\n" % __version__)
return 0
elif opt in ('-c', '--cd'):
action = "cd"
elif opt in ('-s', '--set'):
action = "set"
elif opt in ('-a', '--add-current'):
action = "add"
elif opt in ('-d', '--delete'):
action = "delete"
elif opt in ('-l', '--list'):
action = "list"
elif opt in ("-o", "--open"):
action = "open"
# Parse arguments and do specified action.
if action == "add":
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
name, value = args[0], os.getcwd()
try:
setShortcut(name, value)
except GoError as ex:
error(str(ex))
return 1
elif action == "delete":
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
name, value = args[0], None
try:
setShortcut(name, value)
except GoError as ex:
error(str(ex))
return 1
elif action == "set":
if len(args) != 2:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
name, value = args
try:
setShortcut(name, value)
except GoError as ex:
error(str(ex))
return 1
elif action == "cd":
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
#error("Usage: go [options...] shortcut[/subpath]")
return 1
path = args[0]
if _subsystem == "console":
try:
generateShellScript(shellScript, path)
except KeyError as ex:
error("Unrecognized shortcut: '%s'" % str(ex))
return 1
except GoError as ex:
error(str(ex))
return 1
elif _subsystem == "windows" and sys.platform.startswith("win"):
try:
dir = resolvePath(path)
except GoError as ex:
error("Error resolving '%s': %s" % (path, ex))
return 1
try:
comspec = os.environ["COMSPEC"]
except KeyError:
error("Could not determine shell. No COMSPEC environment "
"variable.")
return 1
argv = [comspec, "/k", # Does command.com support '/k'?
"cd", "/D", '"%s"' % dir]
if os.path.basename(comspec).lower() == "cmd.exe":
argv += ["&&", "title", '%s' % dir]
os.spawnv(os.P_NOWAIT, comspec, argv)
else:
error("Internal error: subsystem is 'windows' and platform is "
"not win32")
return 1
elif action == "list":
if len(args) == 0:
printShortcuts(getShortcuts())
elif len(args) == 1:
pattern = args[0].lower()
shortcuts = getShortcuts()
s = {}
for name, value in shortcuts.items():
if name.lower().find(pattern) != -1:
s[name] = value
printShortcuts(s, "Matching '%s'" % pattern)
else:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
elif action == "open" and sys.platform.startswith("win"):
if | |
in
dframe_2017[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2017.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2017 = dframe_2017[dframe_2017['date'] < end_date]
dframe_2017 = dframe_2017[dframe_2017['date'] > start_date]
dframe_2017.set_index('date', inplace=True)
dframe_2017.rename(columns={compound: f'{compound}_rf'}, inplace=True)
if end_year >= 2018:
dframe_2018 = pd.read_excel(r'Z:\Data\Summit_GC\Summit_GC_2018\NMHC_results\BH_STD_2018.xlsx',
header=None)
dframe_2018.set_index(0, inplace=True)
dframe_transposed = dframe_2018.T
dframe_2018 = dframe_transposed.loc[:, [compound]]
dframe_2018 = dframe_2018.iloc[:, [j for j, c in enumerate(dframe_2018.columns) if j not in [0, 2, 3]]]
dframe_2018['file'] = dframe_transposed.iloc[:, 0]
dframe_2018['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2018.dropna(inplace=True, subset=['file'])
dframe_2018['decmial_date_year'] = [(2018 + (float(row[0]) - 1) / 365) for row in
dframe_2018[['decimal_date']].values]
dframe_2018['Year'] = dframe_2018['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2018['Yearly_Day'] = dframe_2018['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2018['Hour'] = dframe_2018['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2018['Minute'] = dframe_2018['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2018['Second'] = dframe_2018['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2018, month=1, day=1)
dframe_2018['date'] = [
base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2018[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2018.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2018 = dframe_2018[dframe_2018['date'] < end_date]
dframe_2018 = dframe_2018[dframe_2018['date'] > start_date]
dframe_2018.set_index('date', inplace=True)
dframe_2018.rename(columns={compound: f'{compound}_rf'}, inplace=True)
if end_year >= 2019:
dframe_2019 = pd.read_excel(
r'C:\Users\ARL\Desktop\Summit_GC_2019\NMHC_results\BH_STD_2019.xlsx',
header=None)
dframe_2019.set_index(0, inplace=True)
dframe_transposed = dframe_2019.T
dframe_2019 = dframe_transposed.loc[:, [compound]]
dframe_2019 = dframe_2019.iloc[:,
[j for j, c in enumerate(dframe_2019.columns) if j not in [0, 2, 3]]]
dframe_2019['file'] = dframe_transposed.iloc[:, 0]
dframe_2019['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2019.dropna(inplace=True, subset=['file'])
dframe_2019['decmial_date_year'] = [(2019 + (float(row[0]) - 1) / 365) for row in
dframe_2019[['decimal_date']].values]
dframe_2019['Year'] = dframe_2019['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2019['Yearly_Day'] = dframe_2019['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2019['Hour'] = dframe_2019['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2019['Minute'] = dframe_2019['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2019['Second'] = dframe_2019['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2019, month=1, day=1)
dframe_2019['date'] = [
base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2019[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2019.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2019 = dframe_2019[dframe_2019['date'] < end_date]
dframe_2019 = dframe_2019[dframe_2019['date'] > start_date]
dframe_2019.set_index('date', inplace=True)
dframe_2019.rename(columns={compound: f'{compound}_rf'}, inplace=True)
elif start_year == 2018:
if end_year >= 2018:
dframe_2018 = pd.read_excel(r'Z:\Data\Summit_GC\Summit_GC_2018\NMHC_results\BH_STD_2018.xlsx',
header=None)
dframe_2018.set_index(0, inplace=True)
dframe_transposed = dframe_2018.T
dframe_2018 = dframe_transposed.loc[:, [compound]]
dframe_2018 = dframe_2018.iloc[:, [j for j, c in enumerate(dframe_2018.columns) if j not in [0, 2, 3]]]
dframe_2018['file'] = dframe_transposed.iloc[:, 0]
dframe_2018['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2018.dropna(inplace=True, subset=['file'])
dframe_2018['decmial_date_year'] = [(2018 + (float(row[0]) - 1) / 365) for row in
dframe_2018[['decimal_date']].values]
dframe_2018['Year'] = dframe_2018['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2018['Yearly_Day'] = dframe_2018['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2018['Hour'] = dframe_2018['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2018['Minute'] = dframe_2018['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2018['Second'] = dframe_2018['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2018, month=1, day=1)
dframe_2018['date'] = [base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2018[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2018.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2018 = dframe_2018[dframe_2018['date'] < end_date]
dframe_2018 = dframe_2018[dframe_2018['date'] > start_date]
dframe_2018.set_index('date', inplace=True)
dframe_2018.rename(columns={compound: f'{compound}_rf'}, inplace=True)
if end_year >= 2019:
dframe_2019 = pd.read_excel(r'C:\Users\ARL\Desktop\Summit_GC_2019\NMHC_results\BH_STD_2019.xlsx',
header=None)
dframe_2019.set_index(0, inplace=True)
dframe_transposed = dframe_2019.T
dframe_2019 = dframe_transposed.loc[:, [compound]]
dframe_2019 = dframe_2019.iloc[:, [j for j, c in enumerate(dframe_2019.columns) if j not in [0, 2, 3]]]
dframe_2019['file'] = dframe_transposed.iloc[:, 0]
dframe_2019['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2019.dropna(inplace=True, subset=['file'])
dframe_2019['decmial_date_year'] = [(2019 + (float(row[0]) - 1) / 365) for row in
dframe_2019[['decimal_date']].values]
dframe_2019['Year'] = dframe_2019['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2019['Yearly_Day'] = dframe_2019['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2019['Hour'] = dframe_2019['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2019['Minute'] = dframe_2019['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2019['Second'] = dframe_2019['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2019, month=1, day=1)
dframe_2019['date'] = [
base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2019[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2019.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2019 = dframe_2019[dframe_2019['date'] < end_date]
dframe_2019 = dframe_2019[dframe_2019['date'] > start_date]
dframe_2019.set_index('date', inplace=True)
dframe_2019.rename(columns={compound: f'{compound}_rf'}, inplace=True)
elif start_year == 2019:
dframe_2019 = pd.read_excel(r'C:\Users\ARL\Desktop\Summit_GC_2019\NMHC_results\BH_STD_2019.xlsx',
header=None)
dframe_2019.set_index(0, inplace=True)
dframe_transposed = dframe_2019.T
dframe_2019 = dframe_transposed.loc[:, [compound]]
dframe_2019 = dframe_2019.iloc[:, [j for j, c in enumerate(dframe_2019.columns) if j not in [0, 2, 3]]]
dframe_2019['file'] = dframe_transposed.iloc[:, 0]
dframe_2019['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2019.dropna(inplace=True, subset=['file'])
dframe_2019['decmial_date_year'] = [(2019 + (float(row[0]) - 1) / 365) for row in
dframe_2019[['decimal_date']].values]
dframe_2019['Year'] = dframe_2019['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2019['Yearly_Day'] = dframe_2019['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2019['Hour'] = dframe_2019['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2019['Minute'] = dframe_2019['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2019['Second'] = dframe_2019['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2019, month=1, day=1)
dframe_2019['date'] = [base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2019[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2019.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2019 = dframe_2019[dframe_2019['date'] < end_date]
dframe_2019 = dframe_2019[dframe_2019['date'] > start_date]
dframe_2019.set_index('date', inplace=True)
dframe_2019.rename(columns={compound: f'{compound}_rf'}, inplace=True)
dframe = pd.concat([dframe_2017, dframe_2018, dframe_2019])
dframe = dframe.loc[dframe.index < end_date]
dframe = dframe.loc[dframe.index > start_date]
dframe.fillna(value=99999, inplace=True)
return dframe
def excel_rf_Brad6(compound, start, end):
start_date = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
year = start_date.year
dframe = pd.read_excel(r'C:\Users\ARL\Desktop\Summit_GC_2019\NMHC_results\Brad6_STD_2019.xlsx', header=None,
sheet_name='BA 2019 data')
dframe.set_index(0, inplace=True)
dframe_transposed = dframe.T
dframe = dframe_transposed.loc[:, [compound]]
dframe = dframe.iloc[:, [j for j, c in enumerate(dframe.columns) if j not in [0, 2, 3]]]
dframe['file'] = dframe_transposed.iloc[:, 0]
dframe['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe['decmial_date_year'] = [(2019 + (float(row[0]) - 1) / 365) for row in
dframe[['decimal_date']].values]
dframe.dropna(inplace=True, subset=['file'])
dframe['Year'] = dframe['file'].apply(lambda x: int(str(x)[0:4]))
dframe['Yearly_Day'] = dframe['file'].apply(lambda x: int(str(x)[4:7]))
dframe['Hour'] = dframe['file'].apply(lambda x: int(str(x)[7:9]))
dframe['Minute'] = dframe['file'].apply(lambda x: int(str(x)[9:11]))
dframe['Second'] = dframe['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=year, month=1, day=1)
dframe['date'] = [base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe = dframe[dframe['date'] < end_date]
dframe = dframe[dframe['date'] > start_date]
dframe.fillna(value=99999, inplace=True)
dframe.set_index('date', inplace=True)
dframe.rename(columns={compound: f'{compound}_rf'}, inplace=True)
return dframe
def excel_rf_BA(compound, start, end):
start_date = datetime.strptime(start, '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
start_year = int(start_date.year)
end_year = int(end_date.year)
dframe_2017 = pd.DataFrame()
dframe_2018 = pd.DataFrame()
dframe_2019 = pd.DataFrame()
if start_year == 2017:
if end_year >= 2017:
dframe_2017 = pd.read_excel(r'Z:\Data\Summit_GC\Summit_GC_2017\NMHC_results\BA_STD_2017.xlsx',
sheet_name='BA 2017 data',
header=None)
dframe_2017.set_index(0, inplace=True)
dframe_transposed = dframe_2017.T
dframe_2017 = dframe_transposed.loc[:, [compound]]
dframe_2017 = dframe_2017.iloc[:, [j for j, c in enumerate(dframe_2017.columns) if j not in [0, 2, 3]]]
dframe_2017['file'] = dframe_transposed.iloc[:, 0]
dframe_2017['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2017.dropna(inplace=True, subset=['file'])
dframe_2017['decmial_date_year'] = [(2017 + (float(row[0]) - 1) / 365) for row in
dframe_2017[['decimal_date']].values]
dframe_2017['Year'] = dframe_2017['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2017['Yearly_Day'] = dframe_2017['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2017['Hour'] = dframe_2017['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2017['Minute'] = dframe_2017['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2017['Second'] = dframe_2017['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2017, month=1, day=1)
dframe_2017['date'] = [base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2017[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2017.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2017 = dframe_2017[dframe_2017['date'] < end_date]
dframe_2017 = dframe_2017[dframe_2017['date'] > start_date]
dframe_2017.set_index('date', inplace=True)
dframe_2017.rename(columns={compound: f'{compound}_rf'}, inplace=True)
if end_year >= 2018:
dframe_2018 = pd.read_excel(r'Z:\Data\Summit_GC\Summit_GC_2018\NMHC_results\BA_STD_2018.xlsx',
header=None, sheet_name='BA 2018 data')
dframe_2018.set_index(0, inplace=True)
dframe_transposed = dframe_2018.T
dframe_2018 = dframe_transposed.loc[:, [compound]]
dframe_2018 = dframe_2018.iloc[:, [j for j, c in enumerate(dframe_2018.columns) if j not in [0, 2, 3]]]
dframe_2018['file'] = dframe_transposed.iloc[:, 0]
dframe_2018['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2018.dropna(inplace=True, subset=['file'])
dframe_2018['decmial_date_year'] = [(2018 + (float(row[0]) - 1) / 365) for row in
dframe_2018[['decimal_date']].values]
dframe_2018['Year'] = dframe_2018['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2018['Yearly_Day'] = dframe_2018['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2018['Hour'] = dframe_2018['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2018['Minute'] = dframe_2018['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2018['Second'] = dframe_2018['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2018, month=1, day=1)
dframe_2018['date'] = [
base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2018[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2018.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2018 = dframe_2018[dframe_2018['date'] < end_date]
dframe_2018 = dframe_2018[dframe_2018['date'] > start_date]
dframe_2018.set_index('date', inplace=True)
dframe_2018.rename(columns={compound: f'{compound}_rf'}, inplace=True)
if end_year >= 2019:
dframe_2019 = pd.read_excel(r'Z:\Data\Summit_GC\Summit_GC_2019\NMHC_results\BA_STD_2019.xlsx',
header=None, sheet_name='BA 2019 data')
dframe_2019.set_index(0, inplace=True)
dframe_transposed = dframe_2019.T
dframe_2019 = dframe_transposed.loc[:, [compound]]
dframe_2019 = dframe_2019.iloc[:,
[j for j, c in enumerate(dframe_2019.columns) if j not in [0, 2, 3]]]
dframe_2019['file'] = dframe_transposed.iloc[:, 0]
dframe_2019['decimal_date'] = dframe_transposed.iloc[:, 39]
dframe_2019.dropna(inplace=True, subset=['file'])
dframe_2019['decmial_date_year'] = [(2019 + (float(row[0]) - 1) / 365) for row in
dframe_2019[['decimal_date']].values]
dframe_2019['Year'] = dframe_2019['file'].apply(lambda x: int(str(x)[0:4]))
dframe_2019['Yearly_Day'] = dframe_2019['file'].apply(lambda x: int(str(x)[4:7]))
dframe_2019['Hour'] = dframe_2019['file'].apply(lambda x: int(str(x)[7:9]))
dframe_2019['Minute'] = dframe_2019['file'].apply(lambda x: int(str(x)[9:11]))
dframe_2019['Second'] = dframe_2019['file'].apply(lambda x: int(str(x)[11:13]))
base_date = datetime(year=2019, month=1, day=1)
dframe_2019['date'] = [
base_date + timedelta(days=int(row[0] - 1), hours=int(row[1]), minutes=int(row[2]),
seconds=int(row[3])
) for row in
dframe_2019[[
'Yearly_Day',
'Hour',
'Minute',
'Second']].values]
dframe_2019.drop(columns=['file', 'Year', 'Hour', 'Minute', 'Yearly_Day', 'Second'], inplace=True)
dframe_2019 = dframe_2019[dframe_2019['date'] < end_date]
dframe_2019 = dframe_2019[dframe_2019['date'] > start_date]
dframe_2019.set_index('date', inplace=True)
dframe_2019.rename(columns={compound: f'{compound}_rf'}, inplace=True)
elif start_year == 2018:
if end_year >= 2018:
dframe_2018 = pd.read_excel(r'Z:\Data\Summit_GC\Summit_GC_2018\NMHC_results\BA_STD_2018.xlsx',
header=None, sheet_name='BA 2018 data')
dframe_2018.set_index(0, inplace=True)
dframe_transposed = dframe_2018.T
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
import os
import sys
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError, ClientError, ParamError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.gse.v20191112 import gse_client as gse_client_v20191112
from tencentcloud.gse.v20191112 import models as models_v20191112
from jmespath import search
import time
from tccli import six
def doUpdateBucketCORSOpt(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateBucketCORSOptRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateBucketCORSOpt(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteFleet(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteFleetRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteFleet(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateFleetCapacity(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateFleetCapacityRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateFleetCapacity(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeFleetRelatedResources(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeFleetRelatedResourcesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeFleetRelatedResources(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRuntimeConfiguration(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRuntimeConfigurationRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeRuntimeConfiguration(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateAlias(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateAliasRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateAlias(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeGameServerSessionPlacement(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeGameServerSessionPlacementRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeGameServerSessionPlacement(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeGameServerSessionDetails(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeGameServerSessionDetailsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeGameServerSessionDetails(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCcnInstances(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.GseClient(cred, g_param[OptionsDefine.Region], | |
{'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EdgeOrder/listConfigurations'} # type: ignore
def list_product_families_metadata(
self,
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ProductFamiliesMetadata"]
"""This method provides the list of product families metadata for the given subscription.
:param skip_token: $skipToken is supported on list of product families metadata, which provides
the next page in the list of product families metadata.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductFamiliesMetadata or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.edgeorder.v2020_12_01_preview.models.ProductFamiliesMetadata]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProductFamiliesMetadata"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_product_families_metadata.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ProductFamiliesMetadata', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_product_families_metadata.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EdgeOrder/productFamiliesMetadata'} # type: ignore
def list_order_at_subscription_level(
self,
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OrderResourceList"]
"""Lists order at subscription level.
:param skip_token: $skipToken is supported on Get list of order, which provides the next page
in the list of order.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.edgeorder.v2020_12_01_preview.models.OrderResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OrderResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_order_at_subscription_level.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OrderResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_order_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EdgeOrder/orders'} # type: ignore
def list_order_items_at_subscription_level(
self,
filter=None, # type: Optional[str]
expand=None, # type: Optional[str]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OrderItemResourceList"]
"""Lists order item at subscription level.
:param filter: $filter is supported to filter based on order id. Filter supports only equals
operation.
:type filter: str
:param expand: $expand is supported on device details, forward shipping details and reverse
shipping details parameters. Each of these can be provided as a comma separated list. Device
Details for order item provides details on the devices of the product, Forward and Reverse
Shipping details provide forward and reverse shipping details respectively.
:type expand: str
:param skip_token: $skipToken is supported on Get list of order items, which provides the next
page in the list of order items.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderItemResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.edgeorder.v2020_12_01_preview.models.OrderItemResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OrderItemResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_order_items_at_subscription_level.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OrderItemResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_order_items_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EdgeOrder/orderItems'} # type: ignore
def list_addresses_at_resource_group_level(
self,
resource_group_name, # type: str
filter=None, # type: Optional[str]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AddressResourceList"]
"""Lists all the addresses available under the given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param filter: $filter is supported to filter based on shipping address properties. Filter
supports only equals operation.
:type filter: str
:param skip_token: $skipToken is supported on Get list of addresses, which provides the next
page in the list of address.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AddressResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.edgeorder.v2020_12_01_preview.models.AddressResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddressResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_addresses_at_resource_group_level.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AddressResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_addresses_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EdgeOrder/addresses'} # type: ignore
def get_address_by_name(
self,
address_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AddressResource"
"""Gets information about the specified address.
:param address_name: The name of the address Resource within the specified resource group.
address names must be between 3 and 24 characters in length and use any alphanumeric and
underscore only.
:type address_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
| |
variance_level (float): vertical line at % variance explained
Returns:
Nothing.
"""
plt.figure()
plt.plot(np.arange(1, len(variance_explained) + 1), variance_explained, '--k', label='Variance Explained')
# plot vertica line wher variance explained exceeds variance_level
PC = np.argmax(variance_explained>variance_level)
plt.vlines(PC, variance_explained.min(),
variance_explained.max(), colors='r', label=f"{variance_level} % Variance Explained wit first {PC} components")
# add label with variance_level and PC at that level
plt.legend()
plt.xlabel('Number of components')
plt.ylabel('Variance explained')
def plot_pca_scatter(self, scores, pc1, pc2, labels, print_labels=False, facecolor='r', edgecolor='r', fig=None, ax=None):
"""2d scatter plot with ability to specify colour and shape of point and plot trial number for each point
Args:
score (float): values for features in new PC base
pc1 (int): principal component vecotr for x axis
pc2 (int): principal component vector for y axis
label (any): labels to plot for datapoints (must be same length as selected scores)
print_label (bool, optional): Defaults to False.
facecolor (str, optional): . Defaults to 'r'.
edgecolor (str, optional): if face and edge colour same = full point Defaults to 'r'.
fig ([type], optional): optional pass fig if not will be created. Defaults to None.
ax ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
if fig is None:
fig, ax = plt.subplots()
# get data
xs = scores[:,pc1]
ys = scores[:,pc2]
# plot each trial
ax.scatter(xs, ys, facecolors=facecolor, edgecolors=edgecolor)
# add labels and title
plt.xlabel(f"Principal component {pc1}")
plt.ylabel(f"Principal component {pc2}")
plt.title("Trials for two principal components")
# add datapoints
if print_labels:
# zip joins x and y coordinates and palebls in pairs
for x,y,label in zip(xs,ys,labels):
# this method is called for each point
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,5), # distance from text to points (x,y)
size=5,
ha='center') # horizontal alignment can be left, right or center
return fig, ax
def plot_pca_scatter_animated(self, scores, pc1, pc2, fig=None, ax=None, line_col='grey', line_width=1,
maker_shape='o', marker_col='red', marker_facecol='red', marker_size=4):
"""animate 2d scatter plot
Args:
score (float): values for features in new PC base
pc1 (int): principal component vecotr for x axis
pc2 (int): principal component vector for y axis
fig ([type], optional): optional pass fig if not will be created. Defaults to None.
ax ([type], optional): [description]. Defaults to None.
line_col (str, optional): [description]. Defaults to 'grey'.
line_width (int, optional): [description]. Defaults to 1.
maker_shape (str, optional): [description]. Defaults to 'o'.
marker_col (str, optional): [description]. Defaults to 'red'.
marker_facecol (str, optional): [description]. Defaults to 'red'.
marker_size (int, optional): [description]. Defaults to 4.
Returns:
fig (matplotlib subplot figure)
ax (matplotlib subplot axis)
anim (matplotlib animation): [description]
"""
# initialize firuge
if fig is None:
fig, ax = plt.subplots()
# specify data
xs = scores[:,pc1]
ys = scores[:,pc2]
# set axis max min
ax.set_xlim([np.min(xs)-50, np.max(xs)+50])
ax.set_ylim([np.min(ys)-50, np.max(ys)+50])
# add labels and title
ax.set_xlabel(f"Principal component {pc1}")
ax.set_ylabel(f"Principal component {pc2}")
ax.set_title("Trials for two principal components")
# init plot
graph, = ax.plot([] , [], zorder=1, #plot data
color=line_col, linewidth=line_width, #set line params
marker=maker_shape, markeredgecolor=marker_col, markerfacecolor=marker_facecol, markersize=marker_size) #set dots
# define animation function
def update(i, xs, ys, graph):
graph.set_data(xs[:i], ys[:i])
return graph
# animation
anim = animation.FuncAnimation(fig, update, len(xs), fargs=[xs, ys, graph], blit=True, interval=80, repeat=False)
return fig, ax, anim
# ===============================================================================================================================================
class eda():
"""[library to perfrom exploratory data analyis on spikes]
"""
def __init__(self, data, session, main_folder, params):
"""[initialize session analyse object]
Args:
data ([toubple]): [(spikes_df, clusters_df, trials_df)]
session ([string]): [session folder]
main_folder ([type]): [description]
params ([dict]): [dictionary with all necessary infromation
params['sampling_rate] ([int]): [sampling rate for spikes]
]
"""
self.folder = main_folder
self.spikes_df, self.clusters_df, self.trials_df = data
self.session = session
# load all parameters
if 'sampling_rate' in params:
self.sampling_rate = params['sampling_rate']
else:
self.sampling_rate = 20000
# Helper Functions EDA =================================================================================================
# find spikes between
def get_spikes_for_trial(self, array, start, stop):
'''
params: array = numpy array (N,1) with values to check against
start, stop = value to find all values in array between
return: valus in array between start and stop
'''
ar = array[np.logical_and(array >= start, array <= stop)]
if ar.size > 0:
ar = ar[:] - start
return ar
# Plotting ==========================================================================================================
# event plot for all spikes
def plt_spike_train(self, cluster, spikes, trials_df, params=dict()):
"""[generate spike event plot for given spikes and given trial start and end]
Args:
cluster ([int]): [selected neuron]
spikes ([numpy array]): [spikes for neuron to plot]
trials_df ([pandas data frame]): [format: index=trial, col1=start of trial, col2=stop of trial]
params ([dict]): [optional, default = empty, params['brain_region' = brain region of cluster] ]
Returns:
[type]: [description]
"""
# initialize plot
fig, ax = plt.subplots()
# initialize list with spikes per trial
spikes_trials = []
# get spikes for each trial
for row in trials_df.index:
start = trials_df.iloc[row, 0]
stop = trials_df.iloc[row, 1]
spk = self.get_spikes_for_trial(spikes, start, stop)
#if len(spk)>0:
spikes_trials.append(spk)
# plot spikes
ax.eventplot(spikes_trials, color=".2")
# set title and axis labels
if 'brain_region' in params:
ax.set_title(f"Spikes for Cluster {cluster}, Brain Region: {params['brain_region']}")
ax.set_title(f"Spikes for Cluster {cluster}")
ax.set_xlabel(f"Sampling Points [{self.sampling_rate/1000}kHz]")
ax.set_ylabel('Trial')
index = trials_df.index[0::10]
ax.set_yticks(index - index[0])
ax.set_yticklabels(index)
return ax, fig
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show
# plot spike trains araound event with histogram for spike time and trial
def plt_spike_train_hist_bar(self, cluster, event, window, trials_df, spikes_ar, fig=None, ax=[None, None, None], title=None):
"""[summary]
Args:
cluster ([int]): [neuron to plot]
event ([string]): [event name]
window ([int]): [window to plot spikes: event_time-window < spikes < event_time+window]
trials_df ([dataframe]): [event times to plot for, must be only column in dataframe]
spikes_ar ([numpy array]): [array with all spikes of neuron to plot]
fig ([plt.subfig figure], optional): [description]. Defaults to None.
ax (plt.subfig axis, optional): [description]. Defaults to [None, None, None].
title ([string], optional): [description]. Defaults to None.
Returns:
fig [plt.subfig figure]: [description]
(ax1, ax2, ax3) [plt.subfig axis]: [description]
"""
# create fig, gird and axis ===============
if any(i==None for i in ax) or fig==None:
#create figure with shape
fig = plt.figure(figsize=(6,5))
# create gridspecs
gs = fig.add_gridspec(2, 3, hspace=0, wspace=0)
# create axis for hist spike train
ax1 = fig.add_subplot(gs[0, :2])
ax2 = fig.add_subplot(gs[1, :2])
ax2.get_shared_x_axes().join(ax1, ax2)
# create axis for trial hist
ax3 = fig.add_subplot(gs[0, 2])
ax3.get_shared_y_axes().join(ax1, ax3)
else:
ax1, ax2, ax3 = ax
# loop that iterats trough all trials
y = 0
# array to store spike count for each trial
hist_tr = np.empty([0,])
# list to store spikes for time bin in for eventplot and histogram
#spk_ar = np.empty((len(trials_df),1), dtype=object)
spk_ls = []
##spike train plot ========================
# main loop over each trial
for row in trials_df.index:
# derive spike times in range delta around event time for trial
ar = spikes_ar[( ( spikes_ar >= (trials_df[row] - window) ) & ( spikes_ar <= (trials_df[row] + window) ) )]
ar = ar - trials_df[row]
# ad spike count to hist_tr for row
hist_tr = np.append(hist_tr, ar.size)
# add to histogram array
spk_ls.append(ar.flatten().tolist())
# plot eventplot
ax1.eventplot(spk_ls, color=".2")
## draw red line at event
ax1.axvline(x=0,ymin=0,ymax=1,c="red",linewidth=0.5)
# spike train y lable
ax1.set_ylabel('Trial')
## set y axis 1. plot
# set ticks
step = trials_df.index.size/5
start = 0
stop = trials_df.index.size+step/2
ax1.set_yticks(np.arange(start, stop, step).astype(int))
# set tick labels
stop = trials_df.index.size
label = trials_df.index.values[np.arange(start, stop, step).astype(int)]
label = np.append(label, trials_df.index.values[-1])
ax1.set_yticklabels(label)
# set y limits 1. plot
ax1.set_ylim([0, stop])
##labels
# trun x labels inside
ax1.tick_params(axis="x",direction="in")
# turn of labels on shared x axis only ticks
plt.setp(ax1.get_xticklabels(), visible=False)
# write event
ax1.set_title(event, color='red', fontsize=8)
## plot histogram spikes ===========================
num_bins = 60
# flatten list of sikes for histogram
flattened = [val for sublist in spk_ls for val in sublist]
# draw histogram
ax2.hist(flattened, bins=num_bins, color="tab:blue")
# draw red line at event
ax2.axvline(x=0,ymin=0,ymax=1,c="red",linewidth=0.5)
# naming y axis
ax2.set_ylabel('Spike Count')
# set x ticks
step = window/4
start = -window
stop = window+(step/2)
x_ticks = np.arange(start, stop, step)
ax2.set_xticks(x_ticks)
# set x ticks labels to seconds
# set x limits
ax2.set_xlim([-window, window])
| |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileCopyrightText: © 2010 by California Institute of Technology.
#
# statefbk.py - tools for state feedback control
#
# Author: <NAME>, <NAME>
# Date: 31 May 2010
#
# This file contains routines for designing state space controllers
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
"""
"""
import numpy as np
from . import statesp
from .mateqn import care
from .statesp import _ssmatrix
from .exception import ControlSlycot, ControlArgument, ControlDimension
# Make sure we have access to the right slycot routines
try:
from slycot import sb03md57
# wrap without the deprecation warning
def sb03md(n, C, A, U, dico, job="X", fact="N", trana="N", ldwork=None):
ret = sb03md57(A, U, C, dico, job, fact, trana, ldwork)
return ret[2:]
except ImportError:
try:
from slycot import sb03md
except ImportError:
sb03md = None
try:
from slycot import sb03od
except ImportError:
sb03od = None
__all__ = ["ctrb", "obsv", "gram", "place", "place_varga", "lqr", "lqe", "acker"]
# Pole placement
def place(A, B, p):
"""Place closed loop eigenvalues
K = place(A, B, p)
Parameters
----------
A : 2D array_like
Dynamics matrix
B : 2D array_like
Input matrix
p : 1D array_like
Desired eigenvalue locations
Returns
-------
K : 2D array (or matrix)
Gain such that A - B K has eigenvalues given in p
Notes
-----
Algorithm
This is a wrapper function for :func:`scipy.signal.place_poles`, which
implements the Tits and Yang algorithm [1]_. It will handle SISO,
MISO, and MIMO systems. If you want more control over the algorithm,
use :func:`scipy.signal.place_poles` directly.
Limitations
The algorithm will not place poles at the same location more
than rank(B) times.
The return type for 2D arrays depends on the default class set for
state space operations. See :func:`~control.use_numpy_matrix`.
References
----------
.. [1] <NAME> and <NAME>, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
>>> A = [[-1, -1], [0, 1]]
>>> B = [[0], [1]]
>>> K = place(A, B, [-2, -5])
See Also
--------
place_varga, acker
Notes
-----
The return type for 2D arrays depends on the default class set for
state space operations. See :func:`~control.use_numpy_matrix`.
"""
from scipy.signal import place_poles
# Convert the system inputs to NumPy arrays
A_mat = np.array(A)
B_mat = np.array(B)
if A_mat.shape[0] != A_mat.shape[1]:
raise ControlDimension("A must be a square matrix")
if A_mat.shape[0] != B_mat.shape[0]:
err_str = "The number of rows of A must equal the number of rows in B"
raise ControlDimension(err_str)
# Convert desired poles to numpy array
placed_eigs = np.atleast_1d(np.squeeze(np.asarray(p)))
result = place_poles(A_mat, B_mat, placed_eigs, method="YT")
K = result.gain_matrix
return _ssmatrix(K)
def place_varga(A, B, p, dtime=False, alpha=None):
"""Place closed loop eigenvalues
K = place_varga(A, B, p, dtime=False, alpha=None)
Required Parameters
----------
A : 2D array_like
Dynamics matrix
B : 2D array_like
Input matrix
p : 1D array_like
Desired eigenvalue locations
Optional Parameters
---------------
dtime : bool
False for continuous time pole placement or True for discrete time.
The default is dtime=False.
alpha : double scalar
If `dtime` is false then place_varga will leave the eigenvalues with
real part less than alpha untouched. If `dtime` is true then
place_varga will leave eigenvalues with modulus less than alpha
untouched.
By default (alpha=None), place_varga computes alpha such that all
poles will be placed.
Returns
-------
K : 2D array (or matrix)
Gain such that A - B K has eigenvalues given in p.
Algorithm
---------
This function is a wrapper for the slycot function sb01bd, which
implements the pole placement algorithm of Varga [1]. In contrast to the
algorithm used by place(), the Varga algorithm can place multiple poles at
the same location. The placement, however, may not be as robust.
[1] <NAME>. "A Schur method for pole assignment." IEEE Trans. Automatic
Control, Vol. AC-26, pp. 517-519, 1981.
Notes
-----
The return type for 2D arrays depends on the default class set for
state space operations. See :func:`~control.use_numpy_matrix`.
Examples
--------
>>> A = [[-1, -1], [0, 1]]
>>> B = [[0], [1]]
>>> K = place_varga(A, B, [-2, -5])
See Also:
--------
place, acker
"""
# Make sure that SLICOT is installed
try:
from slycot import sb01bd
except ImportError:
raise ControlSlycot("can't find slycot module 'sb01bd'")
# Convert the system inputs to NumPy arrays
A_mat = np.array(A)
B_mat = np.array(B)
if A_mat.shape[0] != A_mat.shape[1] or A_mat.shape[0] != B_mat.shape[0]:
raise ControlDimension("matrix dimensions are incorrect")
# Compute the system eigenvalues and convert poles to numpy array
system_eigs = np.linalg.eig(A_mat)[0]
placed_eigs = np.atleast_1d(np.squeeze(np.asarray(p)))
# Need a character parameter for SB01BD
if dtime:
DICO = "D"
else:
DICO = "C"
if alpha is None:
# SB01BD ignores eigenvalues with real part less than alpha
# (if DICO='C') or with modulus less than alpha
# (if DICO = 'D').
if dtime:
# For discrete time, slycot only cares about modulus, so just make
# alpha the smallest it can be.
alpha = 0.0
else:
# Choosing alpha=min_eig is insufficient and can lead to an
# error or not having all the eigenvalues placed that we wanted.
# Evidently, what python thinks are the eigs is not precisely
# the same as what slicot thinks are the eigs. So we need some
# numerical breathing room. The following is pretty heuristic,
# but does the trick
alpha = -2 * abs(min(system_eigs.real))
elif dtime and alpha < 0.0:
raise ValueError("Discrete time systems require alpha > 0")
# Call SLICOT routine to place the eigenvalues
A_z, w, nfp, nap, nup, F, Z = sb01bd(
B_mat.shape[0],
B_mat.shape[1],
len(placed_eigs),
alpha,
A_mat,
B_mat,
placed_eigs,
DICO,
)
# Return the gain matrix, with MATLAB gain convention
return _ssmatrix(-F)
# contributed by <NAME> <<EMAIL>>
def lqe(A, G, C, QN, RN, NN=None):
"""lqe(A, G, C, QN, RN, [, N])
Linear quadratic estimator design (Kalman filter) for continuous-time
systems. Given the system
.. math::
x &= Ax + Bu + Gw \\\\
y &= Cx + Du + v
with unbiased process noise w and measurement noise v with covariances
.. math:: E{ww'} = QN, E{vv'} = RN, E{wv'} = NN
The lqe() function computes the observer gain matrix L such that the
stationary (non-time-varying) Kalman filter
.. math:: x_e = A x_e + B u + L(y - C x_e - D u)
produces a state estimate that x_e that minimizes the expected squared
error using the sensor measurements y. The noise cross-correlation `NN`
is set to zero when omitted.
Parameters
----------
A, G : 2D array_like
Dynamics and noise input matrices
QN, RN : 2D array_like
Process and sensor noise covariance matrices
NN : 2D array, optional
Cross covariance matrix
Returns
-------
L : 2D array (or | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Toxine project: Text Preprocessing pipeline
#
# Copyright (C) 2019-present by <NAME>
# License: BSD, see LICENSE for details
"""
Tag emojis, emails, dates, phones, urls, html/xml fragments etc.
Tag tokens with unallowed symbols.
Punctuation correction.
Tokenization.
"""
import datetime
from collections import OrderedDict
from functools import reduce
from html import unescape
from nltk import sent_tokenize as nltk_sent_tokenize, \
word_tokenize as nltk_word_tokenize
from pymorphy2 import MorphAnalyzer
from re import compile as re_compile, findall as re_findall, \
match as re_match, search as re_search, split as re_split, \
sub as re_sub
import sys
import uuid
from corpuscula import Conllu, CorpusDict
from corpuscula.utils import LOG_FILE, print_progress
word_is_known = MorphAnalyzer().word_is_known
class TextPreprocessor:
def __init__(self, cdict_restore_from=None, cdict_corpus=None,
cdict_backup_to=None):
"""Init all internal constants.
Run it before use any other function from the package.
:param cdict_restore_from:
:param cdict_corpus:
:param cdict_backup_to:
Params for CorpusDict's constructor.
"""
self._cdict = CorpusDict(
restore_from=cdict_restore_from, corpus=cdict_corpus,
backup_to=cdict_backup_to
)
if self._cdict.isempty():
self.wform_isknown = word_is_known
else:
self.wform_isknown = \
lambda x: self._cdict.wform_isknown(x) or word_is_known(x)
self._corpus = OrderedDict()
self.TAG_MASKS = {}
self.CHAR_DELIM = '|'
re_char_delim = '\\' + self.CHAR_DELIM
self.CHARS_PUNCT_ASIS = '+.,:;!?-'
self.CHARS_PUNCT = '()/"\'«»“”„‟' + self.CHARS_PUNCT_ASIS
self.CHARS_CURRENCY = '$¢£¤¥Ұ' \
+ ''.join(chr(x) for x in range(0x20a0, 0x20d0)) # '₠₡₢₣₤₥₦₧₨₩₪₫€₭₮₯₰₱₲₳₴₵₶₷₸₹₺₻₼₽₾₿...'
self.CHARS_ALLOWED = '_%&~№0-9A-Za-zЁА-Яёа-я`’²³°' \
+ self.CHARS_CURRENCY + self.CHARS_PUNCT #+ 'єіїўқҳ'
self.CHARS_CAPITAL = ''.join(chr(i) for i in range(2**16)
if chr(i).istitle()
and chr(i).isalpha())
self.CHARS_REGULAR = ''.join(
chr(i) for i in range(2**16) if chr(i) not in self.CHARS_CAPITAL
and chr(i).isalpha()
)
self._CAPS = '[' + self.CHARS_CAPITAL + ']'
self._NOCA = '[^' + self.CHARS_CAPITAL + ']'
self._NOCASP = '[^' + self.CHARS_CAPITAL + '\s]'
self._REGU = '[' + self.CHARS_REGULAR + ']'
self._CARE = self._CAPS + self._REGU
self.RE_LF = re_compile(r'([' + self.CHARS_PUNCT_ASIS + '])\n+')
self.RE_LF2 = re_compile(r'([^' + self.CHARS_PUNCT_ASIS + '])\n+\s*('
+ self._NOCA + r')')
char_alpha = r'A-Za-zЁА-Яёа-я'
char_alnum = r'0-9' + char_alpha
char_alnum_ = char_alnum + '_'
self.CHAR_NONALPHA = '[^' + char_alpha + ']'
self.CHAR_ALPHA = '[' + char_alpha + ']'
self.CHAR_NONALNUM = '[^' + char_alnum + ']'
self.CHAR_ALNUM = '[' + char_alnum + ']'
self.CHAR_NONALNUM_ = '[^' + char_alnum_ + ']'
self.CHAR_ALNUM_ = '[' + char_alnum_ + ']'
self.RE_EMOJI = re_compile(r'''(?xmu)
(?:
(''' + self.CHAR_ALNUM + ''') # 1
( :-?[)\]}([{\\/|!]+) # 2
(\.|\s|$) # 3
)|(?:
(^|\s) # 4
([:8Ж]-?[)\]}([{\\/|!]+) # 5
(\.|\s|$) # 6
)|(?:
(^|\s|''' + self.CHAR_ALNUM + ''') # 7
(;-?\)+ | [-=][)(]+ | -_- | ^-^) # 8
(\.|\s|$) # 9
)|(?:
([\u2660-\u27ff\U00010000-\U0010ffff]) # 10
(?!\#\S*) # skip hashtags
)|(?:
(^|[^)(:;=-]) # 11
(\)\)+ | \(\(+) # 12
)|(?:
(^[^(]*) (^|[^)(:;=-]) (\)+) # 13-15
)|(?:
(\(+) ([^)(:;=-]|$) ([^)]*$) # 16-18
)|(?:
<img\sclass='emoji\scode(\d\d\d\d)'[^>]+>
\sЯндекс\sУсловия\sиспользования\s*$ # 19
)
''')
self.TAG_EMOJI = self.register_tag('EntityEmoji')
self.TAG_EMO = 'EMO' + self.TAG_EMOJI
self.RE_EMAIL = re_compile(r'''(?ximu)
(?: mailto: )?
(
[a-z0-9!#$%&'*+/=?^_`{|}~-]+
(?:
\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+
)*|"(?:
[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]
| \\[\x01-\x09\x0b\x0c\x0e-\x7f]
)*"
)\s?@\s?(
(?:
[a-z0-9] # начинается с alphanum
(?:
[a-z0-9-]* # в середине м.б. дефис
[a-z0-9] # в конце только alphanum
)?
\. # последний элемент - точка
)+ # таких элементов не меньше 1
[a-z0-9] # последний элемент начинается с alpnanum
(?:
[a-z0-9-]* # в середине м.б. дефис
[a-z0-9] # в конце только alphanum
)?
| \[(?:
(?: 25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]? )\.
){3}(?:
25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]? | [a-z0-9-]*[a-z0-9]:
(?:
[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]
| \\[\x01-\x09\x0b\x0c\x0e-\x7f] )+
)\]
)
(?!\S*''' + re_char_delim + ''')''')
self.TAG_EMAIL = self.register_tag('EntityEmail', mask='адрес')
self.RE_XML = re_compile(r'''(?ximu)
(?:
<
(?: # вначале символ "<" (открыли тэг); за ним либо:
([a-z:]+) # буквы и знак ":" (имя тэга)
(?:\s[^>]+)? # потом м.б. пробел и дальше всё кроме ">",
> # а ">" завершает тэг
(?:
.* # потом любые символы,
</\1> # а завершается всё закрывающим тэгом "</имя тэга>";
)? # но этого м. и не быть
)| # либо:
</([a-z:]+)> # оборванный закрывающий тэг
) # (типа, конец есть, а начало потерялось)
(?!\S*''' + re_char_delim + ''')''')
self.TAG_XML = self.register_tag('EntityXml')
# scheme <sss>:[//]
re_1 = r'''[^\s:]*[^a-z:+.-]''' # garbage
re_2 = r'''[a-z][0-9a-z+.-]*'''
re_uri_scheme = r'''(?:
( (?:{})? ) ( (?: {} : )+ ) ( // )?
)'''.format(re_1, re_2)
# username[:password] <sss>[:<sss>]@
re_1 = r'''[0-9a-z_.-]'''
re_2 = r'''[0-9a-z_$&~*+=.,;!()-] | [%][0-9a-f][0-9a-f]'''
re_uri_user = r'''(?:
( (?:{})+ )
(?: : ( (?:{})+ ) )?
@
)'''.format(re_1, re_2)
# host[:port] <sss[.sss][...]>[:<ddd>]
re_1 = r'''[0-9a-zёа-я] (?: [0-9a-zёа-я-]*[0-9a-zёа-я] )?'''
re_uri_host = r'''(?:
( {} (?: \. {} )* )
(?: : ( \d+ ) )*
)'''.format(re_1, re_1)
# path </sss[/sss][...]>
re_1 = r'''[0-9a-zёа-я_@$&~+=.,:!()-] | [%][0-9a-f][0-9a-f]'''
re_uri_path = r'''(
(?: / (?:{})* )+
)'''.format(re_1)
# params <;sss[=[sss]][;sss[=[sss]]][...]>
re_1 = r'''[0-9a-zёа-я_@$&~*/+.,:!()-] | [%][0-9a-f][0-9a-f]'''
re_uri_params = r'''(
(?: ;+ (?:{})+ (?: = (?:{})* )? )+
)'''.format(re_1, re_1)
# query <?sss=sss&sss=sss&sss=sss>
re_1 = r'''[0-9a-zёа-я_@$~*/+.,:;!()-] | [%][0-9a-f][0-9a-f]'''
re_uri_query = r'''(
\? (?:{})+ (?: = (?:{})* )?
(?: & (?:{})+ (?: = (?:{})* )? )*
)'''.format(re_1, re_1, re_1, re_1)
# fragment #<sss>
re_1 = r'''[0-9a-zёа-я_@$&~*/+=.,:;!()-] | [%][0-9a-f][0-9a-f]'''
re_uri_fragment = r'''(?:
\# ({})*
)'''.format(re_1)
self.RE_URI = re_compile(r'''(?ximu)
# https://www.ietf.org/rfc/rfc3986.txt
#(?:([^:/?#]+):)? # scheme
#(?://([^/?#]*))? # net_loc
#([^?#]*) # path
#(?:\?([^#]*))? # query
#(?:#(.*))? # fragment
# https://www.w3.org/Addressing/rfc1808.txt
( # uri/1
\b
{0}? # scheme/2
{1}? # username[:password]/3,4
{2}? # host[:port]/5,6
{3}? # path/7
{4}? # params/8
{5}? # query/9
{6}? # fragment/10
)
(?!\S*{7})'''.format(re_uri_scheme, re_uri_user, re_uri_host,
re_uri_path, re_uri_params, re_uri_query,
re_uri_fragment, re_char_delim))
self.TAG_URI = self.register_tag('EntityUri', mask='адрес')
self.RE_PHONE = re_compile(r'''(?ximu)
(^|\D) # 1 TODO: 20(040)420-12-46 --> 20(ENTITY_PHONE
(\+?\d)? # 2
#(\+7|7|8)? # 2
\s?(?:\(|-)?\s? (\d{3,5}) \s?(?:\)|-)?\s? # 3
(\d{1,3})\s?\-?\s? # 4
(\d\d)\s?\-?\s? # 5
(\d\d) # 6
([^-0-9''' + re_char_delim + ''']|$) # 7
(?!\S*''' + re_char_delim + ''')''')
self.TAG_PHONE = self.register_tag('EntityPhone', mask='номер')
self.RE_DATE = re_compile(
r'(?mu)\b(\d\d?)\.(\d\d?)\.(\d\d(?:\d\d)?)(\b|г)'
r'(?!\S*' + re_char_delim + ')'
)
self.TAG_DATE = self.register_tag('EntityDate', mask='сегодня')
self.RE_HASHTAG = re_compile(
# r'(?mu)(^|[\s(])(#' + self.CHAR_ALPHA + self.CHAR_ALNUM_
# r'(?mu)(.)?(#' + self.CHAR_ALNUM_
r'(?mu)(^|[\s(])?(#[' + char_alnum_
+ '\u2660-\u27ff\U00010000-\U0010ffff]'
+ r'{,138})(?!\S*' + re_char_delim + ')'
# + r'{,138})\b(?!\S*' + re_char_delim + ')'
)
self.TAG_HASHTAG = self.register_tag('EntityHashtag')
self.RE_NAMETAG = re_compile(
r'(?mu)(^|[\s(])(@[A-Za-z0-9._]'# + self.CHAR_ALPHA + self.CHAR_ALNUM_
+ r'{,138})\b(?!\S*' + re_char_delim + ')'
)
self.TAG_NAMETAG = self.register_tag('EntityNametag', mask='ссылка')
re_1 = r'\s*[A-ZЁА-Я][^{0}]+[.!?](?:\s*[A-ZЁА-Я][^{0}])*'
self.RE_QUOTATION = re_compile(r'''(?xmu)
(?:(")({0})("))| # 1 - 3
(?:(``)({1})(''))| # 4 - 6
(?:(«)({2})(»))| # 7 - 9
(?:(„)({3})(“))| # 10 - 12
(?:(“)({4})(”)) # 13 - 15
'''.format(re_1.format('"'), re_1.format("`'"), re_1.format('«»'),
re_1.format('„“'), re_1.format('“”')))
self.TAG_QUOTATION_START = self.register_tag('QuotationStart', '``')
self.TAG_QUOTATION_END = self.register_tag('QuotationEnd', "''")
self.RE_TAG = re_compile(
r'([^' + re_char_delim + r'\s]+)' + re_char_delim
+ r'([^' + re_char_delim + r'\s]+)')
self.TAG_UNK = self.register_tag('EntityUnk')
self.SHORTCUTS = []
self.TAG_SHORTCUT = self.CHAR_DELIM + self.CHAR_DELIM + 'Shortcut'
def add_shortcut(self, orig, subst):
res = ''
for subst_ in subst.split():
idx = len(self.SHORTCUTS)
res += '{}{}{}' \
.format('' if orig else ' ', idx, self.TAG_SHORTCUT)
self.SHORTCUTS.append((subst_, orig))
orig = ''
return res
def clear_corpus(self):
self._corpus = OrderedDict()
def new_doc(self, doc_id=None, metadata=None):
"""Create an empty document.
:param doc_id: id of the document. If None then uuid will be used
:type doc_id: str
:param metadata: CoNLL-U metadata that will be returned in document
header
:type metadata: OrderedDict
:return: id of the document created
:rtype: str
"""
if doc_id is None:
doc_id = str(uuid.uuid4())
self._corpus[doc_id] = \
{'meta': OrderedDict(metadata if metadata else [])}
return doc_id
def remove_doc(self, doc_id):
"""Remove the document with a given *doc_id*"""
self._corpus.pop(doc_id, None)
def new_par(self, text, doc_id=None):
"""Add a *text* as a paragraph to the document.
:param doc_id: id of the document. If None then new document will be
created
:type doc_id: str
:return: number of a paragraph created
:rtype: int
"""
return self.new_pars([text], doc_id)[0]
def new_pars(self, pars, eop=r'\n', doc_id=None):
"""Add a list of text blocks as paragraphs to the document. Empty
blocks will be skipped.
:param pars: paragraphs to add. If pars is of str type, it will be
splitted first with ``text_to_pars()``
:type pars: list(str)|str
:param eop: param for ``text_to_pars()``. Ignored if *pars* not of str
type
:param doc_id: id of the document. If None then new document will be
created
:type doc_id: str
:return: lower and higher numbers of paragraphs created
:rtype: tuple(int, int)
"""
if doc_id is None:
doc_id = self.new_doc()
assert doc_id in self._corpus, \
'ERROR: document "{}" has not exist'.format(doc_id)
doc = self._corpus[doc_id]
pars_ = doc.setdefault('pars', [])
par_no1 = len(pars_)
par_no2 = par_no1 - 1
for i, text in enumerate(
self.text_to_pars(pars, eop=eop) if isinstance(pars, | |
<reponame>wayneferdon/WallpaperEngine.NeteaseMusicLyricDesktop
from pykakasi import kakasi
import datetime
import json
import re
import os
import requests
import time
import sqlite3
from enum import Enum
APPDATA = os.getenv("LOCALAPPDATA")
LOGPATH = os.path.expanduser(APPDATA + "/Netease/CloudMusic/cloudmusic.elog")
DATABASE = os.path.expanduser(APPDATA + "/Netease/CloudMusic/Library/webdb.dat")
OUTPUT = 'OutPut.html'
HEADERS = {
'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64;\x64)\
AppleWebKit/537.36 (KHTML,like Gecko)\
Chrome/80.0.3987.87 Safari/537.36'
}
class PlayState(Enum):
STOPPED = 0
PLAYING = 1
EXITED = 2
class LogValidInfo(Enum):
NONE = 0
APPEXIT = 1
PLAY = 2
LOAD = 3
SETPOS = 4
RESUME = 5
PAUSE = 6
class NeteaseMusicStatus:
def __init__(self):
self.LogCount = 0
self.PlayState = PlayState.STOPPED
self.CurrentSong = False
self.CurrentSongLrc = dict()
self.CurrentSongLength = 0
self.LastUpdate = 0
self.kakasi = kakasi()
self.LastLog = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.LastResumeTime = 0
self.LastPauseTime = 0
self.LastPosition = 0
self.CurrentLrc = [
{'Lrc': '', 'Translation': ''},
{'Lrc': '', 'Translation': ''},
{'Lrc': '', 'Translation': ''}
]
self.NextLrcTime = 0
self.SongLrcKeyTime = list()
self.OutPutHtml = str()
self.LocalMusicInfo = LoadSongDataBase()
with open("./Hanzi2Kanji.json", "r") as KanjiLib:
self.Hanzi2KanjiLib = KanjiLib.readlines()
LibJson = ""
for line in self.Hanzi2KanjiLib:
LibJson += line
self.Hanzi2KanjiLib = json.loads(LibJson)
try:
self.LogFile = open(LOGPATH, 'rb')
self.FileSize = os.path.getsize(LOGPATH)
self.LogFile.seek(0, 2)
except Exception:
raise
LineList = self.GetLastLines(1000)
LineList = self.Decode(LineList[0])
if LineList is not None:
LineIndex = 0
while True:
try:
LineIndex -= 1
LineData = LineList[LineIndex]
try:
self.CallbackLog(LineData, True)
except Exception:
pass
except IndexError:
break
self.LastLog = LineList[-1]
with open(OUTPUT, 'w', encoding='utf-8') as OutPutFile:
OutPutFile.write('')
if self.CurrentSong:
CurrentTimePosition = self.LastPosition
if self.PlayState == PlayState.PLAYING:
CurrentTimePosition += time.time() - self.LastResumeTime
self.GetLrc()
self.SetCurrentLrc(CurrentTimePosition)
self.OutPutCurrentLrc()
def Decode(self,data):
a = [
[56,"0"],
[41,"1"],
[26,"2"],
[11,"3"],
[124,"4"],
[109,"5"],
[94,"6"],
[79,"7"],
[176,"8"],
[161,"9"],
[44,"a"],
[31,"b"],
[14,"c"],
[121,"d"],
[104,"e"],
[91,"f"],
[74,"g"],
[181,"h"],
[164,"i"],
[151,"j"],
[12,"C"],
[134,"k"],
[241,"l"],
[224,"m"],
[211,"n"],
[194,"o"],
[60,"p"],
# [0,"q"],
[30,"r"],
[15,"s"],
[120,"t"],
[105,"u"],
# [0,"v"],
[75,"w"],
# [0,"x"],
[165,"y"],
[150,"z"],
# [0,"A"],
[167,"B"],
[107,"U"],
[123,"D"],
[69,"E"],
[89,"F"],
[72,"G"],
# [0,"H"],
[166,"I"],
# [0,"J"],
# [0,"K"],
[110,"L"],
# [0,"M"],
[209,"N"],
[192,"O"],
# [0,"P"],
# [0,"Q"],
[28,"R"],
[13,"S"],
[122,"T"],
# [0,"U"],
# [0,"V"],
[46,"A"],
[73,"_w"],
# [0,"X"],
[148,"Y"],
# [0,"Z"],
[193,"_"],
[177,"("],
[133,"["],
[227,"]"],
[57,""],
[25,"[STX]"],
[146,":"],
[198,"/"],
[228,"-"],
[130,"+"],
[125,"$"],
[27,"\""],
[162,"\t"],
[199,"?"],
[245,","],
[240,"|"],
[215,"."],
[145,"\n"],
[40,"!"],
[243,"L"],
[160,")"],
[226,"M"],
[88,"V"],
[90,"v"],
[183,"H"],
[62,"P"],
[45,"q"],
[135,"{"],
[106,"E"],
[29,"E"],
[242,""],
[229,"="],
[225,"}"],
[108,"G"],
[131," "],
[180,"x"],
]
lista = list()
string = ""
for eachData in data:
found = False
for each in a:
if(each[0] == eachData):
found = True
string += each[1]
break
if not found:
if(eachData not in lista):
lista.append(eachData)
string += "【" + str(eachData) +"】"
continue
result = list()
for each in string.split("\n"):
if each != "":
result.append(each)
result.reverse()
return result
def GetLastLines(self, Length):
try:
FileSize = os.path.getsize(LOGPATH)
if FileSize == 0:
return None
else:
# to use seek from end, must use mode 'rb'
with open(LOGPATH, 'rb') as TargetFile:
Offset = -Length # initialize offset
while -Offset < FileSize: # offset cannot exceed file size
# read offset chars from eof(represent by number'2')
TargetFile.seek(Offset, 2)
Lines = TargetFile.readlines() # read from fp to eof
if len(Lines) >= 2: # if contains at least 2 lines
return Lines # then last line is totally included
else:
Offset *= 2 # enlarge offset
TargetFile.seek(0)
return TargetFile.readlines()
except FileNotFoundError:
return None, False
def GetSongNameAndArtists(self):
Result = dict()
if str(self.CurrentSong) in self.LocalMusicInfo.keys():
try:
JsonDate = json.loads(
self.LocalMusicInfo[str(self.CurrentSong)])
SongName = JsonDate["album"]["name"]
Artists = JsonDate['artists']
SongArtist = 'by: '
for Artist in Artists:
if SongArtist != 'by: ':
SongArtist += ' / '
SongArtist += Artist['name']
Result = {
0: {'Lrc': '无歌词', 'Translation': ''},
1: {'Lrc': SongName, 'Translation': ''},
float("inf"): {'Lrc': SongArtist, 'Translation': ''}
}
except KeyError:
pass
if not Result:
Url = 'https://music.163.com/api/song/detail/' \
'?id=' + str(self.CurrentSong) + \
'&ids=[' + str(self.CurrentSong) + ']'
JsonDate = json.loads(requests.get(Url, headers=HEADERS).text)
JsonDate = JsonDate['songs'][0]
SongName = JsonDate['name']
Artists = JsonDate['artists']
SongArtist = 'by: '
for Artist in Artists:
if SongArtist != 'by: ':
SongArtist += ' / '
SongArtist += Artist['name']
if SongArtist != 'by: ':
Result = {
0: {'Lrc': '无歌词', 'Translation': ''},
1: {'Lrc': SongName, 'Translation': ''},
float("inf"): {'Lrc': SongArtist, 'Translation': ''}
}
else:
Result[0] = {'Lrc': '无歌词', 'Translation': ''}
return Result
def ReloadMonitorPath(self):
try:
self.LogFile.close()
except Exception:
pass
try:
self.LogFile.close()
self.LogFile = open(LOGPATH, "rb")
self.FileSize = os.path.getsize(LOGPATH)
self.LogFile.seek(0, 2)
return True
except Exception:
return False
def CallbackLog(self, Content, Initializing=False):
ValidInfo = LogValidInfo.NONE
LogTime = 0
if 'Appexit.' in Content:
if self.PlayState == PlayState.PLAYING:
self.LastPosition += time.time() - self.LastResumeTime
self.PlayState = PlayState.EXITED
LogTime = time.time()
ValidInfo = LogValidInfo.APPEXIT
print(time.time(),"\t", "Appexit")
elif "[info]" in Content:
Content = Content.strip().strip('\n')
Result = re.split('\\[info]', Content)
LogInfo = Result[1]
LogTime = re.split('\\[(.*?)]', Result[0])
LogTime = time.mktime(datetime.datetime.fromisoformat(LogTime[5]).timetuple())
if 'player._$play' in LogInfo:
self.CurrentSong = re.split('_', re.split('"', LogInfo)[1])[0]
if not Initializing:
self.GetLrc()
if self.PlayState != PlayState.EXITED:
self.LastPosition = 0
self.NextLrcTime = 0
# require load and resume
self.PlayState = PlayState.STOPPED
ValidInfo = LogValidInfo.PLAY
print(time.time(),"\t", "current song:",self.CurrentSong)
elif '???__onAudioPlayerLoad' in LogInfo:
self.CurrentSongLength = json.loads(
re.split('\t', LogInfo)[0])['duration']
ValidInfo = LogValidInfo.LOAD
print(time.time(),"\t", "duration:",self.CurrentSongLength)
elif '???_$setPosition' in LogInfo:
self.LastPosition = json.loads(LogInfo.split('\t')[0])['ratio'] * self.CurrentSongLength
ValidInfo = LogValidInfo.SETPOS
if self.PlayState == PlayState.PLAYING:
self.LastResumeTime = LogTime
print(time.time(),"\t", "Last Position:",self.LastPosition)
elif 'player._$resumedo' in LogInfo:
self.PlayState = PlayState.PLAYING
self.LastResumeTime = LogTime
ValidInfo = LogValidInfo.RESUME
print(time.time(),"\t", "resume")
elif 'player._$pausedo' in LogInfo:
ValidInfo = LogValidInfo.PAUSE
if self.PlayState == PlayState.PLAYING:
self.LastPosition += LogTime - self.LastResumeTime
self.LastPauseTime = LogTime
self.PlayState = PlayState.STOPPED
print(time.time(),"\t", "pause")
if ValidInfo == LogValidInfo.NONE:
return False
if Initializing:
if (
self.CurrentSong
and self.CurrentSongLength
and self.LastPosition
):
return True
self.LastUpdate = LogTime
return False
if ValidInfo in [LogValidInfo.SETPOS, LogValidInfo.RESUME]:
self.SetCurrentLrc(self.LastPosition)
self.OutPutCurrentLrc()
if ValidInfo == LogValidInfo.APPEXIT:
with open(OUTPUT, 'w', encoding='utf-8') as OutPutFile:
OutPutFile.write('')
return True
def Start(self,interval = 0.001):
lastModified=0
while True:
modified = os.path.getmtime(LOGPATH)
if lastModified < modified:
lastModified = modified
CurrentFileSize = os.path.getsize(LOGPATH)
if CurrentFileSize < self.FileSize:
TryCount = 0
while TryCount < 10:
if not self.ReloadMonitorPath():
TryCount += 1
else:
TryCount = 0
self.FileSize = os.path.getsize(LOGPATH)
break
time.sleep(0.1)
if TryCount == 10:
raise Exception("Open %s failed after try 10 times"
% LOGPATH)
else:
self.FileSize = CurrentFileSize
LineList = self.GetLastLines(1)
LineList = self.Decode(LineList[-1])
NewLines = list()
lastLogUpdated = False
lastLog = self.LastLog
for i in range(len(LineList)):
if LineList[i] == lastLog:
break
if not lastLogUpdated:
self.LastLog = LineList[i]
lastLogUpdated = True
NewLines.append(LineList[i])
for i in range(len(NewLines)):
line = NewLines[-i]
self.CallbackLog(line)
if self.PlayState == PlayState.PLAYING:
self.SetCurrentLrc()
self.OutPutCurrentLrc()
time.sleep(interval)
def OutPutCurrentLrc(self):
NewOutPut = GetOutPut(self.CurrentLrc)
if NewOutPut == self.OutPutHtml:
return
with open(OUTPUT, 'w', encoding='utf-8') as OutPutFile:
OutPutFile.write(NewOutPut)
self.OutPutHtml = NewOutPut
@staticmethod
def GetSplitTimeLrc(LrcList):
NewList = dict()
if LrcList:
LrcList = re.split('\n', LrcList)
for LrcItem in LrcList:
LrcItem = re.split('\\[(.*?)]', LrcItem)
try:
LrcTime = LrcItem[1]
if 'by' in LrcTime:
continue
LrcItem = LrcItem[2]
if LrcItem == '':
continue
LrcTime = re.split('\\:', LrcTime.replace(".", ":"))
Minute = int(LrcTime[0])
Second = int(LrcTime[1])
try:
Millisecond = int(LrcTime[2])
except IndexError:
Millisecond = 0
LrcTime = Minute * 60000 + Second * 1000 + Millisecond
NewList[LrcTime] = LrcItem
except Exception:
pass
return NewList
def GetHiraganaLrc(self, Lrc):
LrcSplit = list()
for Split in Lrc:
for each in self.kakasi.convert(Split):
for Item in SplitAll(each['orig'], "((.*?)){1}"):
LrcSplit += self.kakasi.convert(Item)
LrcConverted = ""
LrcRomajinn = ""
PriorHira = ""
IsPreJP = True
for Split in LrcSplit:
orig = Split['orig']
hira = Split['hira']
roma = Split['hepburn']
if not IsPreJP:
orig = orig.replace(" ", " ")
if IsOnlyEnglishOrPunctuation(orig):
LrcConverted += orig + " "
LrcRomajinn += orig + " "
PriorHira = ""
IsPreJP = False
continue
IsPreJP = True
if hira == "":
KanjiLrc = ""
for EachStr in orig:
if EachStr in self.Hanzi2KanjiLib.keys():
KanjiLrc += self.Hanzi2KanjiLib[EachStr][0]
else:
KanjiLrc += EachStr
orig = KanjiLrc
hira = ""
roma = ""
for newEach in kakasi().convert(orig):
hira += newEach['hira']
roma += newEach['hepburn']
if hira == orig:
if hira == PriorHira:
orig = ""
roma = ""
PriorHira = ""
else:
hiraLen = len(hira)
origLen = len(orig)
isDuplicated = False
for i in range(min(hiraLen,origLen)):
if hira[-i-1] == orig[-i-1]:
isDuplicated = True
continue
if isDuplicated:
orig = orig[0:-i] + "(" + hira[0:-i] + ")" + hira[-i:-1] + hira[-1]
PriorHira = ""
break
if not isDuplicated:
PriorHira = "(" + | |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Modul is used for skeleton binary 3D data analysis
"""
# import sys
# import os.path
# path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
import logging
logger = logging.getLogger(__name__)
import traceback
import numpy as np
import scipy.ndimage
import scipy.interpolate
from io import open
class SkeletonAnalyser:
"""
| Example:
| skan = SkeletonAnalyser(data3d_skel, volume_data, voxelsize_mm)
| stats = skan.skeleton_analysis()
| data3d_skel: 3d array with skeleton as 1s and background as 0s
| use_filter_small_objects: removing small objects
| filter_small_threshold: threshold for small filtering
:arg cut_wrong_skeleton: remove short skeleton edges to terminal
:arg aggregate_near_nodes_distance: combine near nodes to one. Parameter
defines distance in mm.
"""
def __init__(
self,
data3d_skel,
volume_data=None,
voxelsize_mm=[1, 1, 1],
use_filter_small=False,
filter_small_threshold=3,
cut_wrong_skeleton=True,
aggregate_near_nodes_distance=0,
):
# for not
self.volume_data = volume_data
self.voxelsize_mm = voxelsize_mm
self.aggregate_near_nodes_distance = aggregate_near_nodes_distance
# get array with 1 for edge, 2 is node and 3 is terminal
logger.debug("Generating sklabel...")
if use_filter_small:
data3d_skel = self.filter_small_objects(data3d_skel, filter_small_threshold)
self.data3d_skel = data3d_skel
# generate nodes and enges (sklabel)
logger.debug("__skeleton_nodes, __generate_sklabel")
skelet_nodes = self.__skeleton_nodes(data3d_skel)
self.sklabel = self.__generate_sklabel(skelet_nodes)
self.cut_wrong_skeleton = cut_wrong_skeleton
self.curve_order = 2
self.spline_smoothing = None
logger.debug(
"Inited SkeletonAnalyser - voxelsize:"
+ str(voxelsize_mm)
+ " volumedata:"
+ str(volume_data is not None)
)
logger.debug("aggreg %s", self.aggregate_near_nodes_distance)
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
self.shifted_zero = None
self.shifted_sklabel = None
self.stats = None
self.branch_label = None
def to_yaml(self, filename):
if self.stats is None:
logger.error("Run .skeleton_analysis() before .to_yaml()")
return
from ruamel.yaml import YAML
yaml = YAML(typ="unsafe")
with open(filename, "wt", encoding="utf-8") as f:
yaml.dump(self.stats, f)
def skeleton_analysis(self, guiUpdateFunction=None):
"""
| Glossary:
| element: line structure of skeleton connected to node on both ends. (index>0)
| node: connection point of elements. It is one or few voxelsize_mm. (index<0)
| terminal: terminal node
"""
def updateFunction(num, length, part):
if (
int(length / 100.0) == 0
or (num % int(length / 100.0) == 0)
or num == length
):
if guiUpdateFunction is not None:
guiUpdateFunction(num, length, part)
logger.info(
"skeleton_analysis: processed "
+ str(num)
+ "/"
+ str(length)
+ ", part "
+ str(part)
)
if self.cut_wrong_skeleton:
updateFunction(0, 1, "cuting wrong skeleton")
self.__cut_short_skeleton_terminal_edges()
stats = {}
len_edg = np.max(self.sklabel)
len_node = np.min(self.sklabel)
logger.debug("len_edg: " + str(len_edg) + " len_node: " + str(len_node))
# init radius analysis
logger.debug("__radius_analysis_init")
if self.volume_data is not None:
skdst = self.__radius_analysis_init()
# get edges and nodes that are near the edge. (+bounding box)
logger.debug("skeleton_analysis: starting element_neighbors processing")
self.elm_neigh = {}
self.elm_box = {}
for edg_number in list(range(len_node, 0)) + list(range(1, len_edg + 1)):
self.elm_neigh[edg_number], self.elm_box[
edg_number
] = self.__element_neighbors(edg_number)
# update gui progress
updateFunction(
edg_number + abs(len_node) + 1,
abs(len_node) + len_edg + 1,
"generating node->connected_edges lookup table",
)
logger.debug("skeleton_analysis: finished element_neighbors processing")
# clear unneeded data. IMPORTANT!!
self.__clean_shifted()
# get main stats
logger.debug(
"skeleton_analysis: starting processing part: length, radius, "
+ "curve and connections of edge"
)
# TODO switch A and B based on neighborhood maximal radius
for edg_number in list(range(1, len_edg + 1)):
try:
edgst = {}
edgst.update(self.__connection_analysis(edg_number))
if "nodeB_ZYX_mm" in edgst and "nodeA_ZYX_mm":
edgst = self.__ordered_points_with_pixel_length(edg_number, edgst)
edgst = self.__edge_curve(edg_number, edgst)
edgst.update(self.__edge_length(edg_number, edgst))
edgst.update(self.__edge_vectors(edg_number, edgst))
else:
logger.warning(
"No B point for edge ID {}. No length computation.".format(
edg_number
)
)
# edgst = edge_analysis(sklabel, i)
if self.volume_data is not None:
edgst["radius_mm"] = float(
self.__radius_analysis(edg_number, skdst)
) # slow (this takes most of time)
stats[edgst["id"]] = edgst
# update gui progress
updateFunction(
edg_number, len_edg, "length, radius, curve, connections of edge"
)
except Exception as e:
logger.warning(
"Problem in connection analysis\n" + traceback.format_exc()
)
logger.debug(
"skeleton_analysis: finished processing part: length, radius, "
+ "curve, connections of edge"
)
# @TODO dokončit
logger.debug(
"skeleton_analysis: starting processing part: angles of connected edges"
)
for edg_number in list(range(1, len_edg + 1)):
try:
if "nodeB_ZYX_mm" in edgst and "nodeA_ZYX_mm" in edgst:
edgst = stats[edg_number]
edgst.update(self.__connected_edge_angle(edg_number, stats))
updateFunction(edg_number, len_edg, "angles of connected edges")
except Exception as e:
logger.warning("Problem in angle analysis\n" + traceback.format_exc())
self.stats = stats
logger.debug(
"skeleton_analysis: finished processing part: angles of connected edges"
)
return stats
def __remove_edge_from_stats(self, stats, edge):
logger.debug("Cutting edge id:" + str(edge) + " from stats")
edg_stats = stats[edge]
connected_edgs = edg_stats["connectedEdgesA"] + edg_stats["connectedEdgesB"]
for connected in connected_edgs:
try:
stats[connected]["connectedEdgesA"].remove(edge)
except:
pass
try:
stats[connected]["connectedEdgesB"].remove(edge)
except:
pass
del stats[edge]
return stats
def __clean_shifted(self):
del self.shifted_zero # needed by __element_neighbors
self.shifted_zero = None
del self.shifted_sklabel # needed by __element_neighbors
self.shifted_sklabel = None
# mozna fix kratkodobych potizi, ale skutecny problem byl jinde
# try:
# del(self.shifted_zero) # needed by __element_neighbors
# except:
# logger.warning('self.shifted_zero does not exsist')
# try:
# del(self.shifted_sklabel) # needed by __element_neighbors
# except:
# logger.warning('self.shifted_zero does not exsist')
def __cut_short_skeleton_terminal_edges(self, cut_ratio=2.0):
"""
cut_ratio = 2.0 -> if radius of terminal edge is 2x its lenght or more,
remove it
"""
def remove_elm(elm_id, elm_neigh, elm_box, sklabel):
sklabel[sklabel == elm_id] = 0
del elm_neigh[elm_id]
del elm_box[elm_id]
for elm in elm_neigh:
elm_neigh[elm] = [x for x in elm_neigh[elm] if x != elm]
return elm_neigh, elm_box, sklabel
len_edg = np.max(self.sklabel)
len_node = np.min(self.sklabel)
logger.debug("len_edg: " + str(len_edg) + " len_node: " + str(len_node))
# get edges and nodes that are near the edge. (+bounding box)
logger.debug("skeleton_analysis: starting element_neighbors processing")
self.elm_neigh = {}
self.elm_box = {}
for edg_number in list(range(len_node, 0)) + list(range(1, len_edg + 1)):
self.elm_neigh[edg_number], self.elm_box[
edg_number
] = self.__element_neighbors(edg_number)
logger.debug("skeleton_analysis: finished element_neighbors processing")
# clear unneeded data. IMPORTANT!!
self.__clean_shifted()
# remove edges+nodes that are not connected to rest of the skeleton
logger.debug(
"skeleton_analysis: Cut - Removing edges that are not"
+ " connected to rest of the skeleton (not counting its nodes)"
)
cut_elm_neigh = dict(self.elm_neigh)
cut_elm_box = dict(self.elm_box)
for elm in self.elm_neigh:
elm = int(elm)
if elm > 0: # if edge
conn_nodes = [i for i in self.elm_neigh[elm] if i < 0]
conn_edges = []
for n in conn_nodes:
try:
nn = self.elm_neigh[n] # get neighbours elements of node
except:
logger.debug(
"Node " + str(n) + " not found! May be already deleted."
)
continue
for (
e
) in (
nn
): # if there are other edges connected to node add them to conn_edges
if e > 0 and e not in conn_edges and e != elm:
conn_edges.append(e)
if (
len(conn_edges) == 0
): # if no other edges are connected to nodes, remove from skeleton
logger.debug(
"removing edge "
+ str(elm)
+ " with its nodes "
+ str(self.elm_neigh[elm])
)
for night in self.elm_neigh[elm]:
remove_elm(night, cut_elm_neigh, cut_elm_box, self.sklabel)
self.elm_neigh = cut_elm_neigh
self.elm_box = cut_elm_box
# remove elements that are not connected to the rest of skeleton
logger.debug(
"skeleton_analysis: Cut - Removing elements that are not connected"
+ " to rest of the skeleton"
)
cut_elm_neigh = dict(self.elm_neigh)
cut_elm_box = dict(self.elm_box)
for elm in self.elm_neigh:
elm = int(elm)
if len(self.elm_neigh[elm]) == 0:
logger.debug("removing element " + str(elm))
remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)
self.elm_neigh = cut_elm_neigh
self.elm_box = cut_elm_box
# get list of terminal nodes
logger.debug("skeleton_analysis: Cut - get list of terminal nodes")
terminal_nodes = []
for elm in self.elm_neigh:
if elm < 0: # if node
conn_edges = [i for i in self.elm_neigh[elm] if i > 0]
if len(conn_edges) == 1: # if only one edge is connected
terminal_nodes.append(elm)
# init radius analysis
logger.debug("__radius_analysis_init")
if self.volume_data is not None:
skdst = self.__radius_analysis_init()
# removes end terminal edges based on radius/length ratio
logger.debug(
"skeleton_analysis: Cut - Removing bad terminal edges based on"
+ " radius/length ratio"
)
cut_elm_neigh = dict(self.elm_neigh)
cut_elm_box = dict(self.elm_box)
for tn in terminal_nodes:
te = [i for i in self.elm_neigh[tn] if i > 0][0] # terminal edge
radius = float(self.__radius_analysis(te, skdst))
edgst = self.__connection_analysis(int(te))
edgst = self.__ordered_points_with_pixel_length(edg_number, edg_stats=edgst)
edgst.update(self.__edge_length(edg_number, edgst))
length = edgst["lengthEstimation"]
# logger.debug(str(radius / float(length))+" "+str(radius)+" "+str(length))
if (radius / float(length)) > cut_ratio:
logger.debug("removing edge " + str(te) + " with its terminal node.")
remove_elm(elm, cut_elm_neigh, cut_elm_box, self.sklabel)
self.elm_neigh = cut_elm_neigh
self.elm_box = cut_elm_box
# check if some nodes are not forks but just curves
logger.debug(
"skeleton_analysis: Cut - check if some nodes are not forks but just curves"
)
for elm | |
<reponame>ContactEngineering/SurfaceTopography<gh_stars>1-10
#
# Copyright 2020-2021 <NAME>
# 2020-2021 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
from scipy.signal import get_window
from ..FFTTricks import get_window_2D
from ..HeightContainer import UniformTopographyInterface
from ..UniformLineScanAndTopography import DecoratedUniformTopography
class WindowedUniformTopography(DecoratedUniformTopography):
"""
Construct a topography with a window function applied to it.
"""
name = 'windowed_topography'
def __init__(self, topography, window=None, direction=None, info={}):
"""
window : str, optional
Window for eliminating edge effect. See scipy.signal.get_window.
(Default: no window for periodic Topographies, "hann" window for
nonperiodic Topographies)
direction : str, optional
Direction in which the window is applied. Possible options are
'x', 'y' and 'radial'. If set to None, it chooses 'x' for line
scans and 'radial' for topographies. (Default: None)
"""
super().__init__(topography, info=info)
self._window_name = window
self._direction = direction
self._window_data = None
def _make_window(self):
self._window_data = None
n = self.parent_topography.nb_grid_pts
try:
nx, ny = n
except ValueError:
nx, = n
window_name = self._window_name
if not self.parent_topography.is_periodic and window_name is None:
window_name = "hann"
direction = self._direction
if direction is None:
direction = 'x' if self.parent_topography.dim == 1 else 'radial'
# Construct window
if window_name is not None and window_name != 'None':
if direction == 'x':
# Get window from scipy.signal
win = get_window(window_name, nx)
# Normalize window
win *= np.sqrt(nx / (win ** 2).sum())
elif direction == 'y':
if self.parent_topography.dim == 1:
raise ValueError("Direction 'y' does not make sense for line scans.")
# Get window from scipy.signal
win = get_window(window_name, ny)
# Normalize window
win *= np.sqrt(ny / (win ** 2).sum())
elif direction == 'radial':
if self.parent_topography.dim == 1:
raise ValueError("Direction 'radial' does not make sense for line scans.")
win = get_window_2D(window_name, nx, ny,
self.parent_topography.physical_sizes)
# Normalize window
win *= np.sqrt(nx * ny / (win ** 2).sum())
else:
raise ValueError(f"Unknown direction '{self._direction}'.")
self._window_data = win
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), \
self._window_name, self._direction
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._window_name, self._direction = state
super().__setstate__(superstate)
@property
def window_data(self):
if self._window_data is None:
self._make_window()
return self._window_data
def heights(self):
""" Computes the windowed topography.
"""
if self.window_data is None:
return self.parent_topography.heights()
else:
direction = self._direction
if direction is None:
direction = 'x' if self.parent_topography.dim == 1 else 'radial'
if direction == 'x':
return (self.window_data * self.parent_topography.heights().T).T
elif direction == 'y' or direction == 'radial':
return self.window_data * self.parent_topography.heights()
else:
raise ValueError(f"Unknown direction '{self._direction}'.")
class FourierFilteredUniformTopography(DecoratedUniformTopography):
name = 'filtered_topography'
def __init__(self, topography,
filter_function=lambda qx, qy: (np.abs(qx) <= 1) * np.abs(qy) <= 1,
isotropic=True,
info={}):
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
super().__init__(topography, info=info)
self._filter_function = filter_function
self._is_filter_isotropic = isotropic
# TODO: should be deductible from the filter function signature
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), \
self._filter_function, self._is_filter_isotropic
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._filter_function, self._is_filter_isotropic = state
super().__setstate__(superstate)
@property
def is_filter_isotropic(self):
return self._is_filter_isotropic
def filter_function(self, *args):
"""
Parameters
----------
if dim = 2 and filter is not isotropic
qx, qy
if dim = 1
q
"""
if self.dim == 2 and not self.is_filter_isotropic \
and len(args) != 2:
raise ("ValueError: qx, qy expected")
elif self.dim == 1 and len(args) != 1:
raise ("ValueError: q expected")
return self._filter_function(*args)
def heights(self):
if self.dim == 2:
nx, ny = self.parent_topography.nb_grid_pts
sx, sy = self.parent_topography.physical_sizes
qx = np.arange(0, nx, dtype=np.float64).reshape(-1, 1)
qx = np.where(qx <= nx // 2, qx / sx, (qx - nx) / sx)
qx *= 2 * np.pi
qy = np.arange(0, ny // 2 + 1, dtype=np.float64).reshape(1, -1)
qy *= 2 * np.pi / sy
if self.is_filter_isotropic:
h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *
self.filter_function(np.sqrt(qx ** 2 + qy ** 2)))
else:
h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *
self.filter_function(qx, qy))
return h_qs
elif self.dim == 1:
s, = self.parent_topography.physical_sizes
n, = self.parent_topography.nb_grid_pts
q = abs(2 * np.pi * np.fft.rfftfreq(n, s / n))
h = self.parent_topography.heights()
h_q = np.fft.rfft(h)
h_q_filtered = np.fft.irfft(h_q * self.filter_function(q))
# Max_imaginary = np.max(np.imag(shifted_pot))
# assert Max_imaginary < 1e-14 *np.max(np.real(shifted_pot)) ,
# f"{Max_imaginary}"
return np.real(h_q_filtered)
class ShortCutTopography(FourierFilteredUniformTopography):
name = 'shortcut_filtered_topography'
def __init__(self, topography,
cutoff_wavevector=None, cutoff_wavelength=None,
kind="circular step",
info={}):
r"""Applies a short wavelength cut filter to the topography using fft.
for `kind=="circular step"` (default), parts of the spectrum with
`|q| > cutoff_wavevector` are set to zero
for `kind=="square step"`, parts of the spectrum with
`q_x > cutoff_wavevector or q_y > cutoff_wavevector ` are set to zero
either `cutoff_wavelength` or
`cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`
have to be provided.
Parameters
----------
topography: Topography
cutoff_wavevector: float
highest wavevector
cutoff_wavelength: float
shortest wavelength
kind: {"circular step", "square step"}
Returns
-------
Topography with filtered heights
Examples
--------
>>> topography.shortcut(cutoff_wavevector=2 * np.pi / l)
>>> topography.shortcut(cutoff_wavelength=l) # equivalent
"""
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
if cutoff_wavelength is None:
if cutoff_wavevector is not None:
cutoff_wavelength = 2 * np.pi / cutoff_wavevector
else:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
elif cutoff_wavevector is not None:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
self._cutoff_wavelength = cutoff_wavelength
self._kind = kind
def circular_step(q):
return q <= self.cutoff_wavevector
def square_step(qx, qy):
return (np.abs(qx) <= self.cutoff_wavevector) * (
np.abs(qy) <= self.cutoff_wavevector)
if self._kind == "circular step":
super().__init__(topography, info=info,
filter_function=circular_step)
elif self._kind == "square step":
super().__init__(topography, info=info,
filter_function=square_step, isotropic=False)
else:
raise ValueError("Invalid kind")
@property
def cutoff_wavevector(self):
return 2 * np.pi / self._cutoff_wavelength
@property
def cutoff_wavelength(self):
return self._cutoff_wavelength
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), self._filter_function, \
self._kind, self._cutoff_wavelength
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._filter_function, self._kind, \
self._cutoff_wavelength = state
super().__setstate__(superstate)
class LongCutTopography(FourierFilteredUniformTopography):
name = 'longcut_filtered_topography'
def __init__(self, topography,
cutoff_wavevector=None, cutoff_wavelength=None,
kind="circular step",
info={}):
r"""Applies a long wavelength cut filter to the topography using fft.
for `kind=="circular step"` (default), parts of the spectrum with
`|q| < cutoff_wavevector` are set to zero
for `kind=="square step"`, parts of the spectrum with
`q_x < cutoff_wavevector or q_y < cutoff_wavevector ` are set to zero
either `cutoff_wavelength` or
`cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`
have to be provided.
Parameters
----------
topography: Topography
cutoff_wavevector: float
highest wavevector
cutoff_wavelength: float
shortest wavelength
kind: {"circular step", "square step"}
Returns
-------
Topography with filtered heights
Examples
--------
>>> topography.longcut(cutoff_wavevector=2 * np.pi / l)
>>> topography.longcut(cutoff_wavelength=l) # equivalent
"""
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
if cutoff_wavelength is None:
if cutoff_wavevector is not None:
cutoff_wavelength = 2 * np.pi / cutoff_wavevector
else:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
elif cutoff_wavevector is not None:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
self._cutoff_wavelength = cutoff_wavelength
self._kind = kind
def circular_step(q):
return q >= self.cutoff_wavevector
def | |
"""
A jGrid is a georeferenced nD array (n >= 3) chunked into fractions along its
x/y axes AND the time axis (and NOT chunked over the other axes).
It is suitable to process pixels as timeseries.
It can be viewed as a nD array (e.g. [width, height, ndates]).
A jGrid consists of a header (a .jghdr3 file, which is just a JSON file) and
of many fractions (stored in a jdata directory alongside the header) which have
a fixed size and are numbered in row-major order according to their position
on the 2D grid.
For the time axis chunks, they are simply numbered. For example, fraction 4390
will have filenames 4390.0, 4390.1, 4390.2, each containing the same xy area
but subsequent slices of the time axis.
Fractions are named '<frac_num>.<frac_time_chunk>.jdata' where frac_num is the
flattened index in the xy grid and frac_time_num is the time chunk.
The (frac_num, frac_time_chunk) tuple is the fraction id.
A jGrid is sparse : if a fraction file does not exist, this means the jGrid
has no data for said fraction.
A jGrid has a dtype (like a numpy array) and each fraction is basically the
binary representation of the nD numpy array
A jGrid also has an associated osr.SpatialReference() which allows to map
and reproject the jGrid. This is stored as WKT in the header.
The jGrid can be stored either on a traditional filesystem or on HDFS where
the fraction size can be set such that a fraction fits in one block, which
leads to good performance for algorithms that can work on a per-fraction basis
"""
from __future__ import division
import os
import sys
import re
import numpy as np
import json
import copy
import cStringIO as StringIO
import rastercube.utils as utils
import pyprind
from osgeo import osr
import rastercube.io as rasterio
def read_frac(fname, hdfs_client=None):
"""
This returns data or None if the fraction is empty
"""
if not rasterio.fs_exists(fname, hdfs_client):
return None
else:
if fname.startswith('hdfs://'):
blob = rasterio.fs_read(fname, hdfs_client)
return np.load(StringIO.StringIO(blob))
else:
# If reading from fs://, we short-circuit fs_read
return np.load(rasterio.strip_uri_proto(fname, 'fs://'))
def write_frac(fname, data, hdfs_client=None):
if fname.startswith('hdfs://'):
buf = StringIO.StringIO()
np.save(buf, data)
rasterio.fs_write(fname, buf.getvalue(), hdfs_client)
else:
# Short-circuit _fs_read if reading from fs://
fname = rasterio.strip_uri_proto(fname, 'fs://')
outdir = os.path.dirname(fname)
utils.mkdir_p(outdir)
# If writing to fs://, we short-cirtcuit fs_write
with open(fname, 'wb') as f:
np.save(f, data)
# *? and ?? turn on lazy mode (so it first tries to match the frac_num)
FRAC_ID_FROM_FNAME_RE = re.compile(r'^.*?/??(?P<frac_num>\d+)\.(?P<frac_time_chunk>\d+)\.jdata$')
def frac_id_from_fname(fname):
"""
Given a fraction filename, extracts the fracid
Returns:
A tuple (frac_num, frac_time_chunk)
"""
m = FRAC_ID_FROM_FNAME_RE.match(fname)
if m is None:
raise ValueError('Not a fraction filename %s' % fname)
return (int(m.group('frac_num')), int(m.group('frac_time_chunk')))
def load(gridroot):
return Header.load(gridroot)
class Header(object):
"""
Contains jGrid3 metadata and function to load part of the grid.
"""
def __init__(self, grid_root, width, height, frac_width, frac_height,
sr_wkt, dtype, geot, shape, timestamps_ms=None,
meta=None, nodataval=None, frac_ndates=None):
"""
Args:
grid_root: The grid root (e.g. fs:///worldgrid/ndvi
or hdfs:///worldgrid/ndvi)
width, height: the total size of the grid
frac_width, frac_height : fraction size
frac_ndates : fraction size along the time axis
sr_wkt: The spatial reference for the grid, as a WKT understood
by osr.
geot: GDAL Affine GeoTransform
(see http://www.gdal.org/gdal_datamodel.html)
Xgeo = geot[0] + Xpixel * geot[1] + Yline * geot[2]
Ygeo = geot[3] + Xpixel * geot[4] + Yline * geot[5]
(so geot[2] and geot[4] shoudl be 0 for north up images
dtype: A numpy datatype describing the data in the grid
shape: The full shape of this grid (the first two dimensions are
redundant with height, width, but if the grid is nD,
we need the additional dimensions)
timestamps_ms: A list of timestamp as int in milliseconds. This is
saved to meta
meta: A dict of metadata values
nodataval: The nodata value. This is saved to meta
"""
assert shape[0] == height
assert shape[1] == width
self.shape = shape
if meta is None:
meta = {}
self.meta = meta
if timestamps_ms is not None:
self.meta['timestamps_ms'] = timestamps_ms
assert self.shape[2] == len(timestamps_ms)
if nodataval is not None:
self.meta['nodataval'] = nodataval
self.grid_root = grid_root
self.width = width
self.height = height
self.frac_width = frac_width
self.frac_height = frac_height
assert width % frac_width == 0,\
"width should be a multiple of frac_width"
assert height % frac_height == 0,\
"height should be a multiple of frac_height"
# Note that contrary to frac_width, frac_height, we support frac_ndates
# NOT dividing exactly the number of timestamps. This is necessary to
# be able to extend the fractions along the time axis later
if frac_ndates is not None:
self.frac_ndates = frac_ndates
else:
self.frac_ndates = self.shape[2]
if self.has_timestamps:
self.num_dates_fracs = int(
np.ceil(self.shape[2] / float(self.frac_ndates))
)
else:
self.num_dates_fracs = 1
self.num_x_fracs = width // frac_width
self.num_y_fracs = height // frac_height
self.num_fracs = self.num_x_fracs * self.num_y_fracs
self.spatialref = osr.SpatialReference()
self.spatialref.ImportFromWkt(sr_wkt)
assert np.allclose(geot[2], 0), "geo_t[2] should be 0"
assert np.allclose(geot[4], 0), "geo_t[4] should be 0"
self.geot = geot
self.dtype = np.dtype(dtype)
wgs84_sr = osr.SpatialReference()
wgs84_sr.ImportFromEPSG(4326)
self.wgs84_to_sr = osr.CoordinateTransformation(
wgs84_sr, self.spatialref)
self.sr_to_wgs84 = osr.CoordinateTransformation(
self.spatialref, wgs84_sr)
@property
def has_timestamps(self):
return 'timestamps_ms' in self.meta
@property
def timestamps_ms(self):
if self.has_timestamps:
return np.array(self.meta['timestamps_ms'])
else:
return None
@property
def nodataval(self):
if 'nodataval' in self.meta:
return self.meta['nodataval']
else:
return None
def copy(self, root=None, dtype=None, shape=None, nodataval=None,
meta=None, frac_ndates=None):
"""
Return a copy of this header, with a possibly different root/dtype
"""
if root is None:
root = self.grid_root
if dtype is None:
dtype = self.dtype
if shape is None:
shape = self.shape
if nodataval is None:
nodataval = self.nodataval
if meta is None:
meta = copy.deepcopy(self.meta)
if frac_ndates is None:
frac_ndates = self.frac_ndates
return Header(
root, self.width, self.height, self.frac_width, self.frac_height,
self.spatialref.ExportToWkt(), dtype, self.geot, shape, meta=meta,
nodataval=nodataval, frac_ndates=frac_ndates)
def geot_for_xyfrom(self, xy_from):
"""
Given a (x, y) tuple, returns the geotransform that has its
top left coordinate at said pixel. This is useful in conjunction
with the xy_from reported from load_slice
"""
# Xgeo = geot[0] + Xpixel' * geot[1] + Yline' * geot[2]
# Ygeo = geot[3] + Xpixel' * geot[4] + Yline' * geot[5]
#
# Let Xpixel' = Xpixel + xy_from[0]
# Yline' = Yline + xy_from[1]
#
# (and xy_from is constant across pixels)
# Then, we have to modify geot[0] and geot[3] as follow :
# geot'[0] = geot[0] + xy_from[0] * geot[1] + xy_from[1] * geot[2]
# geot'[3] = geot[3] + xy_from[0] * geot[4] + xy_from[1] * geot[5]
#
new_geot = copy.deepcopy(self.geot)
new_geot[0] += xy_from[0] * self.geot[1] + xy_from[1] * self.geot[2]
new_geot[3] += xy_from[0] * self.geot[4] + xy_from[1] * self.geot[5]
return new_geot
def xy2latlng(self, xy):
"""
Returns the latlng for the top left of the pixel at xy
"""
assert len(xy) == 2
# (gt[0], gt[3]) is the top left position of the top left pixel
x, y = xy
# that is to guarantee that lng = lng2x(x2lng(lng))
x += 1e-8
y += 1e-8
x_geo = self.geot[0] + x * self.geot[1] + y * self.geot[2]
y_geo = self.geot[3] + x * self.geot[4] + y * self.geot[5]
lng, lat, _ = self.sr_to_wgs84.TransformPoint(x_geo, y_geo)
return (lat, lng)
def latlng2xy(self, latlng):
lat, lng = latlng
x_geo, y_geo, _ = self.wgs84_to_sr.TransformPoint(lng, lat)
# This only works if self.geot[2] == self.geot[4] == 0
assert np.allclose(self.geot[2], 0)
assert np.allclose(self.geot[4], 0)
x = (x_geo - self.geot[0]) / self.geot[1]
y = (y_geo - self.geot[3]) / self.geot[5]
return (int(x), int(y))
def poly_latlng2xy(self, latlngs):
"""
Convert a list of (lat, lng) tuples
"""
return map(self.latlng2xy, latlngs)
def frac_num(self, frac_x, frac_y):
"""Given fraction coordinates, return its frac_num"""
return frac_y * self.num_x_fracs + frac_x
def x_start(self, frac_num):
return (frac_num % self.num_x_fracs) * self.frac_width
def x_end(self, frac_num):
return self.x_start(frac_num) + self.frac_width
def y_start(self, frac_num):
return (frac_num // self.num_x_fracs) * self.frac_height
def y_end(self, frac_num):
return self.y_start(frac_num) + self.frac_height
def frac_xyranges(self, frac_num):
return (self.x_start(frac_num), self.x_end(frac_num),
self.y_start(frac_num), self.y_end(frac_num))
def frac_time_range(self, frac_time_chunk):
"""
Returns: The time range this chunk covers as (time_start, time_end),
where time_end is exclusive
"""
t_from = frac_time_chunk * self.frac_ndates
t_to = min(self.shape[2], t_from + self.frac_ndates)
return (t_from, t_to)
def frac_fname(self, frac_id):
return os.path.join(self.grid_root, 'jdata', '%d.%d.jdata' % frac_id)
def frac_fnames_for_num(self, frac_num):
"""
Returns the list of filenames for all the date slices of this | |
program source
(http://www.freebsd.org/cgi/man.cgi?query=indent)
The options of the IndentFormatter are based on the union of
options of indent versions for FreeBSD, OpenBSD, OS X and GNU indent.
Many options will not be valid for the indent version that is used.
The complains about unknown options are registered and the offending options won't be
used for subsequent rounds of optimization.
"""
shortname = 'indent'
alternative_names = ['gindent']
base_optionname = 'indent_base_style'
columnlimitname = 'l'
configfilename = '.indent.pro'
# yapf: disable
opts = [('bacc', 'enum', ('bacc', 'nbacc')),
('bad', 'enum', ('bad', 'nbad')),
('bap', 'enum', ('bap', 'nbap')),
('bbb', 'enum', ('bbb', 'nbbb')),
('bbo', 'enum', ('bbo', 'nbbo')),
('bc', 'enum', ('bc', 'nbc')),
('bfda', 'enum', ('bfda', 'nbfda')),
('bfde', 'enum', ('bfde', 'nbfde')),
('bli', 'int', ()),
('br', 'enum', ('br', 'bl')),
('brf', 'enum', ('brf', 'blf')),
('brs', 'enum', ('brs', 'bls')),
('bs', 'enum', ('bs',)),
('c', 'int', ()),
('cbi', 'int', ()),
('cd', 'int', ()),
('cdb', 'enum', ('cdb', 'ncdb')),
('cdw', 'enum', ('cdw', 'ncdw')),
('ce', 'enum', ('ce', 'nce')),
('ci', 'int', ()),
('cli', 'int', ()),
('cp', 'int', ()),
('cs', 'enum', ('cs', 'ncs')),
('d', 'int', ()),
('di', 'int', ()),
('dj', 'enum', ('dj', 'ndj')),
('djn', 'enum', ('djn', 'ndjn')),
('eei', 'enum', ('eei', 'neei')),
('ei', 'enum', ('ei', 'nei')),
('fbs', 'enum', ('fbs', 'nfbs')),
('fc1', 'enum', ('fc1', 'nfc1')),
('fca', 'enum', ('fca', 'nfca')),
('fcb', 'enum', ('fcb', 'nfcb')),
('hnl', 'enum', ('hnl', 'nhnl')),
('i', 'int', ()),
('il', 'int', ()),
('ip', 'int', ()),
('l', 'int', ()),
('lc', 'int', ()),
('ldi', 'int', ()),
('lp', 'enum', ('lp', 'nlp')),
('lps', 'enum', ('lps', 'nlps')),
('npro', 'enum', ('npro',)),
('pcs', 'enum', ('pcs', 'npcs')),
('pi', 'int', ()),
('ppi', 'int', ()),
('prs', 'enum', ('prs', 'nprs')),
('psl', 'enum', ('psl', 'npsl')),
('saf', 'enum', ('saf', 'nsaf')),
('sai', 'enum', ('sai', 'nsai')),
('saw', 'enum', ('saw', 'nsaw')),
('sbi', 'int', ()),
('sc', 'enum', ('sc', 'nsc')),
('sob', 'enum', ('sob', 'nsob')),
('ss', 'enum', ('ss', 'nss')),
('st', 'enum', ('st',)),
('ts', 'int', ()),
('ut', 'enum', ('ut', 'nut'))]
# yapf: enable
def __init__(self, exe, cache=None):
super(IndentFormatter, self).__init__(exe, cache=cache)
@classmethod
def executable_names(cls):
# Change the order to prefer gindent instead of indent if available.
return cls.alternative_names + [cls.shortname]
def register_options(self):
styles = []
gnu_ident = self.prefer_basestyle
for optname, opttype, configs in self.opts:
if not gnu_ident and optname == 'ip':
# The BSD indent is not a numeric but a boolean option.
opttype, configs = 'enum', ('ip', 'nip')
styles.append(option_make(optname, opttype, configs))
if self.prefer_basestyle:
styles.append(option_make(self.base_optionname, 'enum', ('orig', 'linux', 'kr',
'gnu')))
self.styledefinition = styledef_make(styles)
@property
def prefer_basestyle(self):
# type: () -> bool
return self.version_string.startswith('GNU indent')
def cmdlineopt(self, optionname, value):
# type: (str, OptionValue) -> str
option = self.styledefinition[optionname]
styletype = option_type(option)
configs = option_configs(option)
if configs:
return "-%s" % value
if styletype == 'int':
return "-%s%s" % (optionname, value)
else:
raise ValueError
def styletext(self, styles):
# type: (Style) -> str
fragments = []
for optionname, value in self.sorted_style(styles).items():
fragments.append(self.cmdlineopt(optionname, value))
return '\n'.join(fragments) + '\n'
def cmdargs_for_style(self, formatstyle, filename=None):
# type: (Style, Optional[str]) -> List[str]
assert isinstance(formatstyle, Style)
# -npro: ignore .indent.pro files
# -st: read source from stdin, write result to stdout
cmdargs = ['-npro', '-st']
for optname, value in sorted(formatstyle.items()):
cmdargs.append(self.cmdlineopt(optname, value))
return cmdargs
def should_report_error(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
if jobres.error is not None:
return True
if self.invalid_cmdline_option(job, jobres):
return INFO_INVALIDS in args_info
return jobres.returncode != 0 or bool(jobres.stderr)
def valid_job_result(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
return jobres.error is None and jobres.returncode == 0 and not bool(jobres.stderr)
def invalid_cmdline_option(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
if jobres.returncode != 1:
return False
# Handle the known error messages
msg = unistr(jobres.stderr)
if msg.startswith('command line: unknown option'):
return True
if msg.startswith('command line: option'):
return True
if msg.startswith('indent: bad font specification'):
return True
if msg.startswith('indent: ?: unknown parameter'):
return True
if msg.endswith('requires a parameter\n'):
return True
return False
def variants_for(self, option):
# type: (Option) -> List[Style]
def kvpairs(vs):
# type: (Iterable[OptionValue]) -> List[Style]
return stylevariants(stylename, vs)
stylename = option_name(option)
styletype = option_type(option)
configs = option_configs(option)
if configs:
return kvpairs(configs)
if stylename == self.columnlimitname:
return kvpairs(self.column_limit_candidates)
if styletype == 'int':
return kvpairs([0, 1, 2, 4, 8, 16])
return []
def reformat(self, sourcefile, destfile, configfile):
# type: (str, str, str) -> None
tmpdir = tempfile.mkdtemp(prefix='whatstyle_')
cfg = os.path.join(tmpdir, self.configfilename)
copyfile(configfile, cfg)
data = readbinary(sourcefile)
# -st: read source from stdin, write result to stdout
exeresult = run_executable(self.exe, ['-st'], stdindata=data)
writebinary(destfile, exeresult.stdout)
os.remove(cfg)
os.rmdir(tmpdir)
# ----------------------------------------------------------------------
class YapfFormatter(CodeFormatter):
"""Formatter for:
yapf: Formatter for Python code.
(https://github.com/google/yapf)
"""
shortname = 'yapf'
_prefer_basestyle = True
base_optionname = 'based_on_style'
columnlimitname = 'column_limit'
base_styles = 'pep8 chromium google facebook'.split()
configfilename = '.style.yapf'
styledump_argument = '--style-help'
def __init__(self, exe, cache=None):
super(YapfFormatter, self).__init__(exe, cache=cache)
@staticmethod
def typefromvalue(optvalue):
# type: (str) -> str
if optvalue in ['true', 'false']:
return 'bool'
try:
int(optvalue)
return 'int'
except ValueError:
pass
return 'string'
def register_options(self):
# type: () -> None
"""Parse options from text like this
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True
Align closing bracket with visual indentation.
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=False
Insert a blank line before a 'def' or 'class' immediately nested
"""
styles = [option_make(self.base_optionname, 'string', self.base_styles)]
for optname, optvalue in self.iter_options(style_make()):
styles.append(option_make(optname, self.typefromvalue(optvalue), tuple()))
self.styledefinition = styledef_make(styles)
def effective_style(self, style):
# type: (Style) -> Style
stylevalues = style_make()
for optname, optvalue in self.iter_options(style):
set_option(stylevalues, optname, optvalue)
return stylevalues
def iter_options(self, style):
# type: (Style) -> Iterator[TextPair]
dump = self.style_dump(style)
for optname, optvalue in parse_keyvalue_pairs(dump):
optname = optname.lower()
optvalue = optvalue.lower()
yield optname, optvalue
def styletext(self, styles):
# type: (Style) -> str
fragments = ['[style]']
for optionname, value in self.sorted_style(styles).items():
fragments.append('%s = %s' % (optionname, textrepr(value)))
return '\n'.join(fragments) + '\n'
def cmdargs_for_style(self, formatstyle, filename=None):
# type: (Style, Optional[str]) -> List[str]
assert isinstance(formatstyle, Style)
inlinestyle = self.inlinestyletext(formatstyle)
cmdargs = ['--no-local-style', '--style=%s' % inlinestyle]
if filename is not None:
cmdargs.append(filename)
return cmdargs
def should_report_error(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
# Yapf exits with code 2 when the reformatted output is different
# from the input and with code 0 when the output is unchanged.
if jobres.error is not None:
return True
return jobres.returncode not in [0, 2] or bool(jobres.stderr)
def valid_job_result(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
return jobres.error is None and jobres.returncode in [0, 2] and not jobres.stderr
def variants_for(self, option):
# type: (Option) -> List[Style]
def kvpairs(vs):
# type: (Iterable[OptionValue]) -> List[Style]
return stylevariants(stylename, vs)
stylename = option_name(option)
styletype = option_type(option)
configs = option_configs(option)
if configs:
return kvpairs(configs)
if styletype == 'bool':
return kvpairs([True, False])
if styletype == 'int':
if stylename == 'column_limit':
# Here we can get weird results, for example
# in bottle_sqlalchemy.py is a constructor with
# 8 arguments which are already split between two lines.
# We find an optimum column limit of 126 because this
# has less diff lines than putting each argument on a new
# line. Maybe we should use a different diff metric.
return kvpairs(self.column_limit_candidates)
elif stylename == 'indent_width':
return kvpairs([2, 4, 8])
elif stylename == 'spaces_before_comment':
return kvpairs(inclusiverange(1, 4))
elif stylename.startswith('split_penalty'):
# We avoid changing large integers whose purpose
# is not exactly clear for the moment.
pass
return []
# ----------------------------------------------------------------------
class HtmlTidyFormatter(CodeFormatter):
"""Formatter for:
Tidy - The granddaddy of HTML tools.
(http://www.html-tidy.org)
"""
shortname = 'tidy'
columnlimitname = 'wrap'
configfilename = 'tidy.conf'
styledump_argument = '-show-config'
def __init__(self, exe, cache=None):
super(HtmlTidyFormatter, self).__init__(exe, cache=cache)
style = style_make()
set_option(style, 'indent', 'yes')
self.initial_style = style
def register_options(self):
# type: () -> None
"""Parse options from XML like this:
<?xml version="1.0"?>
<config version="5.1.25">
<option class="print">
<name>indent-spaces</name>
<type>Integer</type>
<default>2</default>
<example>0, 1, 2, ...</example>
<description>This option specifies the number of spaces or tabs that
Tidy uses to indent content when <code>indent</code> is enabled.
<br/>Note that the default value for this option is dependent
upon the value of <code>indent-with-tabs</code> (see also).
</description>
<seealso>indent</seealso>
</option>
</config>
"""
exeresult = run_executable(self.exe, ['-xml-config'], cache=self.cache)
buf = BytesIO(exeresult.stdout)
optionname = None # type: Optional[str]
optiontype = None
example = None
options = []
for event, elem in ETree.iterparse(buf, events=('start', 'end')):
tag = elem.tag
if event == 'end':
if optionname is not None and tag == 'option':
# First ignore some options
if optionname.startswith('show-'):
continue
| |
used to find IPv4 network overlaps
. overlapping addresses in stdout will be highlighted with 'Match Found!'
"""
parser = argparse.ArgumentParser(description='VPC Report Generator', epilog=argv_epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-filename', '-f', nargs=1, help='Input JSON filename (override read from AWS)')
parser.add_argument('-profile', nargs=1, help='AWSCLI profile to use (override environment settings)')
parser.add_argument('-region', nargs=1, help='AWS Region to report on (override environment settings)')
parser.add_argument('-vpc-ids', nargs='+', metavar='vpc-id', help='When specified, limits to one or more VPC IDs versus all VPCs in the Region')
parser.add_argument('-j', '-json', nargs='?', const='NOFILE', metavar='filename', help='Output JSON to stdout (unless -w specified), optionally specify filename to override vpc-repr.json')
parser.add_argument('-split', nargs='?', const='name', choices=['name', 'id'], metavar='name|id', help='Instead of stdout, output JSON/HTML is written to multiple files using VPC name or id as filename')
parser.add_argument('-w', '-web', action='store_true', help='Output HTML to stdout')
parser.add_argument('-ip', nargs=1, help='IP Search - enter IP Address or Network with prefix (e.g. 10.10.10.10 or 10.10.10.0/24)')
parser.add_argument('-verbose', action='store_true', help='Display additonal help on -ip switch and command line examples')
parser.add_argument('-az', action='store_true', help='Show Availability Zones')
parser.add_argument('-ci', action='store_true', help='Show CIDR Blocks')
parser.add_argument('-do', action='store_true', help='Show DHCP Options')
parser.add_argument('-ep', action='store_true', help='Show Endpoints')
parser.add_argument('-gw', action='store_true', help='Show Gateways')
parser.add_argument('-na', action='store_true', help='Show NACLs')
parser.add_argument('-ni', action='store_true', help='Show Network Interfaces')
parser.add_argument('-pc', action='store_true', help='Show Peering Connections')
parser.add_argument('-rt', action='store_true', help='Show Route Tables')
parser.add_argument('-ta', action='store_true', help='Show VPC Tags')
parser.add_argument('-sh', action='store_true', help='Show Sharing - reserved for future use')
parser.add_argument('-sn', action='store_true', help='Show Subnets')
parser.add_argument('-sg', action='store_true', help='Show Security Groups')
parser.add_argument('-vp', action='store_true', help='Show VPNs - reserved for future use')
args = parser.parse_args()
# -vp or -sh
if args.sh is True or args.vp is True:
raise AttributeError('-sh and -vp switches are not yet supported')
# -verbose
if args.verbose is True:
print(argv_epilog, argv_verbose)
sys.exit()
# input filename and it exists?
if args.filename is not None:
read_mode = True
if not os.path.exists(args.filename[0]):
raise FileNotFoundError(f'{args.filename[0]} not found')
# open output -j filename or fail
if read_mode is False:
if args.j is not None and args.j != "NOFILE":
json_fh = open(args.j, mode='w')
else:
json_fh = open('vpc-repr.json', mode='w')
# set stdout output indicators
out = "json"
if args.w is True or args.j == None:
out = "html"
## get input
# read from json file
if read_mode is True:
vpc_fh = open(args.filename[0], mode='r')
vpcs = json.load(vpc_fh)
vpc_fh.close()
repr_title = "unknown region"
if "Region" in vpcs:
repr_title = vpcs['Region']
region = repr_title
repr_date = os.path.getmtime(args.filename[0])
repr_date = datetime.datetime.fromtimestamp(repr_date)
repr_date = repr_date.strftime("%m/%d/%y %I:%M %p")
repr_title += f" ({args.filename[0]} saved on {repr_date})"
# or get data from AWS
else:
# determine session parameters, start session, get client
region = None
profile = None
if args.region:
region = args.region[0]
if args.profile:
profile = args.profile[0]
new_session = boto3.Session(region_name=region, profile_name=profile)
repr_title = new_session.region_name
ec2 = new_session.client('ec2')
# get vpc list (from argv or all)
if args.vpc_ids is None:
vpcs = ec2.describe_vpcs()
else:
vpcs = ec2.describe_vpcs(VpcIds=args.vpc_ids)
# remove http response data from dict
del vpcs['ResponseMetadata']
# add region to vpcs to save in json file
vpcs['Region'] = repr_title
# get prefix list definitions
prefix_dict = {}
tmp = ec2.describe_prefix_lists()
for prefix in tmp['PrefixLists']:
prefix_dict[prefix['PrefixListId']] = prefix['PrefixListName']
# get the az list once
az_dict = ec2.describe_availability_zones()
del az_dict['ResponseMetadata']
## collect items for vpcs in the opposite direction (i.e. item -> vpc versus vpc -> item)
# egress only internet gateways
eoig_dict = ec2.describe_egress_only_internet_gateways()
del eoig_dict['ResponseMetadata']
# internet gateways
ig_dict = ec2.describe_internet_gateways()
del ig_dict['ResponseMetadata']
# nat gateways
ng_dict = ec2.describe_nat_gateways()
del ng_dict['ResponseMetadata']
# eliminate default vpc
for i in range(len(vpcs['Vpcs'])):
if vpcs['Vpcs'][i]['IsDefault'] is True:
del vpcs['Vpcs'][i]
break
## process all vpcs to build each vpc object
for vpc in vpcs['Vpcs']:
# get id for current vpc
vpc_id = vpc['VpcId']
# save subnet names for other sections to use
sn_names_dict = {} # sn_id -> sn_name
tmp = ec2.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
for sn in tmp['Subnets']:
# get subnet name from tags if it exists
sn_names_dict[sn['SubnetId']] = get_tag_name(sn)
# azs
vpc['AvailabilityZones'] = az_dict['AvailabilityZones']
# dhcp options
if 'DhcpOptionsId' in vpc and vpc['DhcpOptionsId'] != "":
tmp = ec2.describe_dhcp_options(DhcpOptionsIds=[vpc['DhcpOptionsId']])
vpc['DhcpOptions'] = tmp['DhcpOptions']
# egress only internet gateways
for g in eoig_dict['EgressOnlyInternetGateways']:
for a in g['Attachments']:
if a['VpcId'] == vpc_id:
vpc['EgressOnlyInternetGateways'] = [g]
# internet gateway
for g in ig_dict['InternetGateways']:
for a in g['Attachments']:
if a['VpcId'] == vpc_id:
vpc['InternetGateways'] = [g]
# nat gateways
for g in ng_dict['NatGateways']:
if g['VpcId'] == vpc_id:
if "NatGateways" in vpc:
vpc['NatGateways'].append(g)
else:
vpc['NatGateways'] = [g]
# transit gateways
# get tgw's for vpc and dedupe the ids
tg_ids = {}
tmp = ec2.describe_transit_gateway_vpc_attachments(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
for i in range(len(tmp['TransitGatewayVpcAttachments'])):
tg_ids[tmp['TransitGatewayVpcAttachments'][i]['TransitGatewayId']] = True
if len(tg_ids) > 0:
tmp = ec2.describe_transit_gateways(TransitGatewayIds=sorted(tg_ids.keys()))
vpc['TransitGateways'] = tmp['TransitGateways']
# network interfaces
tmp = ec2.describe_network_interfaces(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
if len(tmp['NetworkInterfaces']) != 0:
vpc['NetworkInterfaces'] = tmp['NetworkInterfaces']
# subnets
tmp = ec2.describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
if len(tmp['Subnets']) != 0:
vpc['Subnets'] = tmp['Subnets']
# vpc endpoints
tmp = ec2.describe_vpc_endpoints(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
if len(tmp['VpcEndpoints']) != 0:
vpc['VpcEndpoints'] = tmp['VpcEndpoints']
# vpc peering connections (requester-vpc-info.vpc-id / accepter-vpc-info.vpc-id) - move from routes
pcrs = ec2.describe_vpc_peering_connections(Filters=[{'Name': 'requester-vpc-info.vpc-id', 'Values': [f"{vpc_id}"]}])
pcas = ec2.describe_vpc_peering_connections(Filters=[{'Name': 'accepter-vpc-info.vpc-id', 'Values': [f"{vpc_id}"]}])
#dedupe peering records with same connection id
tmp = {}
if len(pcrs['VpcPeeringConnections']) != 0:
for pcr in pcrs['VpcPeeringConnections']:
tmp[pcr['VpcPeeringConnectionId']] = pcr
if len(pcas['VpcPeeringConnections']) != 0:
for pca in pcas['VpcPeeringConnections']:
tmp[pca['VpcPeeringConnectionId']] = pca
if len(tmp) != 0:
vpc['VpcPeeringConnections'] = []
for pc in tmp.values():
vpc['VpcPeeringConnections'].append(pc)
# route tables
tmp = ec2.describe_route_tables(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
for rt in tmp['RouteTables']:
for r in rt['Routes']:
notes = ""
if "DestinationPrefixListId" in r:
notes += prefix_dict[r['DestinationPrefixListId']] + " "
if "InstanceId" in r:
notes += "Instance ID "
if "TransitGatewayId" in r:
gwa = ec2.describe_transit_gateway_vpc_attachments(Filters=[
{'Name': 'transit-gateway-id', 'Values': [f"{r['TransitGatewayId']}"]},
{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}
])
for i in range(len(gwa['TransitGatewayVpcAttachments'])):
notes += f"{gwa['TransitGatewayVpcAttachments'][i]['TransitGatewayAttachmentId']} ({gwa['TransitGatewayVpcAttachments'][i]['State']}) "
if "LocalGatewayId" in r:
lgws = ec2.describe_local_gateways(LocalGatewayIds=[r['LocalGatewayId']])
notes += f"Local Gateway - Outpost ARN (State): {lgws['LocalGateways'][0]['OutpostArn']} ({lgws['LocalGateways'][0]['State']}) "
if "CarrierGatewayId" in r:
cgws = ec2.describe_carrier_gateways(CarrierGatewayIds=[r['CarrierGatewayId']])
notes += f"Carrier Gateway - State: {cgws['CarrierGateways'][0]['State']} "
if "VpcPeeringConnectionId" in r:
notes += f"VPC Peering Connection "
if notes != "":
r['Notes'] = notes
if len(tmp['RouteTables']) != 0:
vpc['RouteTables'] = tmp['RouteTables']
# security groups
tmp = ec2.describe_security_groups(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
for sg in tmp['SecurityGroups']:
# get rules
sgrs = ec2.describe_security_group_rules(Filters=[{'Name': 'group-id', 'Values': [sg['GroupId']]}])
if len(sgrs) != 0:
sg['SecurityGroupRules'] = []
for sgr in sgrs['SecurityGroupRules']:
sg['SecurityGroupRules'].append(sgr)
if len(tmp['SecurityGroups']) != 0:
vpc['SecurityGroups'] = tmp['SecurityGroups']
# nacls
tmp = ec2.describe_network_acls(Filters=[{'Name': 'vpc-id', 'Values': [f"{vpc_id}"]}])
if len(tmp['NetworkAcls']) != 0:
vpc['NetworkAcls'] = tmp['NetworkAcls']
# write out vpcs to json output file handle
json.dump(vpcs, json_fh, indent=2, default=datetime_handler)
# section limited stdout?
if True in [args.az, args.ci, args.do, args.ep, args.gw, args.na, args.ni, args.pc, args.rt, args.sg, args.sh, args.sn, args.ta, args.vp]:
for vpc in vpcs['Vpcs']:
if not args.az and "AvailabilityZones" in vpc:
del vpc['AvailabilityZones']
if not args.ci and "CidrBlock" in vpc:
del vpc['CidrBlock']
if not args.ci and "Ipv6CidrBlockAssociationSet" in vpc:
del vpc['Ipv6CidrBlockAssociationSet']
if not args.ci and "CidrBlockAssociationSet" in vpc:
del vpc['CidrBlockAssociationSet']
if not args.do and "DhcpOptions" in vpc:
del vpc['DhcpOptions']
if not args.ep and "VpcEndpoints" in vpc:
del vpc['VpcEndpoints']
if not args.gw and "EgressOnlyInternetGateways" in vpc:
del vpc['EgressOnlyInternetGateways']
if not args.gw and "InternetGateways" in vpc:
del vpc['InternetGateways']
if not args.gw and "NatGateways" in vpc:
del vpc['NatGateways']
if not args.gw and "TransitGateways" in vpc:
del vpc['TransitGateways']
if not args.na and "NetworkAcls" in vpc:
del vpc['NetworkAcls']
if not args.ni and "NetworkInterfaces" in vpc:
del vpc['NetworkInterfaces']
if not args.pc and "VpcPeeringConnections" in vpc:
del vpc['VpcPeeringConnections']
if not args.rt and "RouteTables" in vpc:
del vpc['RouteTables']
if not args.sg and "SecurityGroups" in vpc:
del vpc['SecurityGroups']
if not args.sh:
pass
if not args.sn and "Subnets" in vpc:
del vpc['Subnets']
if not args.ta and "Tags" in vpc:
del vpc['Tags']
if not args.vp:
pass
# ip/network search
if args.ip is not None:
ip = None
network = None
try:
network = ipaddress.ip_network(args.ip[0])
except:
network = None
if ip is None and network is None:
print(f"\nUnable to process ip string provided: {args.ip[0]}\n")
sys.exit(2)
# convert IP to network, classify input ipv4 or ipv6
# there is no tool in ipaddress module to | |
<reponame>gleichdick/rosdep
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author <NAME>/<EMAIL>
from __future__ import print_function
import os
import sys
import yaml
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib2 import URLError
try:
import cPickle as pickle
except ImportError:
import pickle
from .cache_tools import compute_filename_hash, PICKLE_CACHE_EXT, write_atomic, write_cache_file
from .core import InvalidData, DownloadFailure, CachePermissionError
from .gbpdistro_support import get_gbprepo_as_rosdep_data, download_gbpdistro_as_rosdep_data
from .meta import MetaDatabase
try:
import urlparse
except ImportError:
import urllib.parse as urlparse # py3k
try:
import httplib
except ImportError:
import http.client as httplib # py3k
import rospkg
import rospkg.distro
from .loader import RosdepLoader
from .rosdistrohelper import get_index, get_index_url
# default file to download with 'init' command in order to bootstrap
# rosdep
DEFAULT_SOURCES_LIST_URL = 'https://raw.githubusercontent.com/ros/rosdistro/master/rosdep/sources.list.d/20-default.list'
# seconds to wait before aborting download of rosdep data
DOWNLOAD_TIMEOUT = 15.0
SOURCES_LIST_DIR = 'sources.list.d'
SOURCES_CACHE_DIR = 'sources.cache'
# name of index file for sources cache
CACHE_INDEX = 'index'
# extension for binary cache
SOURCE_PATH_ENV = 'ROSDEP_SOURCE_PATH'
def get_sources_list_dirs(source_list_dir):
if SOURCE_PATH_ENV in os.environ:
sdirs = os.environ[SOURCE_PATH_ENV].split(os.pathsep)
else:
sdirs = [source_list_dir]
for p in list(sdirs):
if not os.path.exists(p):
sdirs.remove(p)
return sdirs
def get_sources_list_dir():
# base of where we read config files from
# TODO: windows
if 0:
# we can't use etc/ros because environment config does not carry over under sudo
etc_ros = rospkg.get_etc_ros_dir()
else:
etc_ros = '/etc/ros'
# compute default system wide sources directory
sys_sources_list_dir = os.path.join(etc_ros, 'rosdep', SOURCES_LIST_DIR)
sources_list_dirs = get_sources_list_dirs(sys_sources_list_dir)
if sources_list_dirs:
return sources_list_dirs[0]
else:
return sys_sources_list_dir
def get_default_sources_list_file():
return os.path.join(get_sources_list_dir(), '20-default.list')
def get_sources_cache_dir():
ros_home = rospkg.get_ros_home()
return os.path.join(ros_home, 'rosdep', SOURCES_CACHE_DIR)
# Default rosdep.yaml format. For now this is the only valid type and
# is specified for future compatibility.
TYPE_YAML = 'yaml'
# git-buildpackage repo list
TYPE_GBPDISTRO = 'gbpdistro'
VALID_TYPES = [TYPE_YAML, TYPE_GBPDISTRO]
class DataSource(object):
def __init__(self, type_, url, tags, origin=None):
"""
:param type_: data source type, e.g. TYPE_YAML, TYPE_GBPDISTRO
:param url: URL of data location. For file resources, must
start with the file:// scheme. For remote resources, URL
must include a path.
:param tags: tags for matching data source to configurations
:param origin: filename or other indicator of where data came from for debugging.
:raises: :exc:`ValueError` if parameters do not validate
"""
# validate inputs
if type_ not in VALID_TYPES:
raise ValueError('type must be one of [%s]' % (','.join(VALID_TYPES)))
parsed = urlparse.urlparse(url)
if not parsed.scheme or (parsed.scheme != 'file' and not parsed.netloc) or parsed.path in ('', '/'):
raise ValueError('url must be a fully-specified URL with scheme, hostname, and path: %s' % (str(url)))
if not type(tags) == list:
raise ValueError('tags must be a list: %s' % (str(tags)))
self.type = type_
self.tags = tags
self.url = url
self.origin = origin
def __eq__(self, other):
return isinstance(other, DataSource) and \
self.type == other.type and \
self.tags == other.tags and \
self.url == other.url and \
self.origin == other.origin
def __str__(self):
if self.origin:
return '[%s]:\n%s %s %s' % (self.origin, self.type, self.url, ' '.join(self.tags))
else:
return '%s %s %s' % (self.type, self.url, ' '.join(self.tags))
def __repr__(self):
return repr((self.type, self.url, self.tags, self.origin))
class RosDistroSource(DataSource):
def __init__(self, distro):
self.type = TYPE_GBPDISTRO
self.tags = [distro]
# In this case self.url is a list if REP-143 is being used
self.url = get_index().distributions[distro]['distribution']
self.origin = None
# create function we can pass in as model to parse_source_data. The
# function emulates the CachedDataSource constructor but does the
# necessary full filepath calculation and loading of data.
def cache_data_source_loader(sources_cache_dir, verbose=False):
def create_model(type_, uri, tags, origin=None):
# compute the filename has from the URL
filename = compute_filename_hash(uri)
filepath = os.path.join(sources_cache_dir, filename)
pickle_filepath = filepath + PICKLE_CACHE_EXT
if os.path.exists(pickle_filepath):
if verbose:
print('loading cached data source:\n\t%s\n\t%s' % (uri, pickle_filepath), file=sys.stderr)
with open(pickle_filepath, 'rb') as f:
rosdep_data = pickle.loads(f.read())
elif os.path.exists(filepath):
if verbose:
print('loading cached data source:\n\t%s\n\t%s' % (uri, filepath), file=sys.stderr)
with open(filepath) as f:
rosdep_data = yaml.safe_load(f.read())
else:
rosdep_data = {}
return CachedDataSource(type_, uri, tags, rosdep_data, origin=filepath)
return create_model
class CachedDataSource(object):
def __init__(self, type_, url, tags, rosdep_data, origin=None):
"""
Stores data source and loaded rosdep data for that source.
NOTE: this is not a subclass of DataSource, though it's API is
duck-type compatible with the DataSource API.
"""
self.source = DataSource(type_, url, tags, origin=origin)
self.rosdep_data = rosdep_data
def __eq__(self, other):
try:
return self.source == other.source and \
self.rosdep_data == other.rosdep_data
except AttributeError:
return False
def __str__(self):
return '%s\n%s' % (self.source, self.rosdep_data)
def __repr__(self):
return repr((self.type, self.url, self.tags, self.rosdep_data, self.origin))
@property
def type(self):
"""
:returns: data source type
"""
return self.source.type
@property
def url(self):
"""
:returns: data source URL
"""
return self.source.url
@property
def tags(self):
"""
:returns: data source tags
"""
return self.source.tags
@property
def origin(self):
"""
:returns: data source origin, if set, or ``None``
"""
return self.source.origin
class DataSourceMatcher(object):
def __init__(self, tags):
self.tags = tags
def matches(self, rosdep_data_source):
"""
Check if the datasource matches this configuration.
:param rosdep_data_source: :class:`DataSource`
"""
# all of the rosdep_data_source tags must be in our matcher tags
return not any(set(rosdep_data_source.tags) - set(self.tags))
@staticmethod
def create_default(os_override=None):
"""
Create a :class:`DataSourceMatcher` to match the current
configuration.
:param os_override: (os_name, os_codename) tuple to override
OS detection
:returns: :class:`DataSourceMatcher`
"""
distro_name = rospkg.distro.current_distro_codename()
if os_override is None:
os_detect = rospkg.os_detect.OsDetect()
os_name, os_version, os_codename = os_detect.detect_os()
else:
os_name, os_codename = os_override
tags = [t for t in (distro_name, os_name, os_codename) if t]
return DataSourceMatcher(tags)
def download_rosdep_data(url):
"""
:raises: :exc:`DownloadFailure` If data cannot be
retrieved (e.g. 404, bad YAML format, server down).
"""
try:
f = urlopen(url, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
data = yaml.safe_load(text)
if type(data) != dict:
raise DownloadFailure('rosdep data from [%s] is not a YAML dictionary' % (url))
return data
except (URLError, httplib.HTTPException) as e:
raise DownloadFailure(str(e) + ' (%s)' % url)
except yaml.YAMLError as e:
raise DownloadFailure(str(e))
def download_default_sources_list(url=DEFAULT_SOURCES_LIST_URL):
"""
Download (and validate) contents of default sources list.
:param url: override URL of default sources list file
:return: raw sources list data, ``str``
:raises: :exc:`DownloadFailure` If data cannot be
retrieved (e.g. 404, bad YAML format, server down).
:raises: :exc:`urllib2.URLError` If data cannot be
retrieved (e.g. 404, server down).
"""
try:
f = urlopen(url, timeout=DOWNLOAD_TIMEOUT)
except (URLError, httplib.HTTPException) as e:
raise URLError(str(e) + ' (%s)' % url)
data = f.read().decode()
f.close()
if not data:
raise DownloadFailure('cannot download defaults file from %s : empty contents' % url)
# parse just for validation
try:
parse_sources_data(data)
except InvalidData as e:
raise DownloadFailure(
'The content downloaded from %s failed to pass validation.'
' It is likely that the source is invalid unless the data was corrupted during the download.'
' The contents were:{{{%s}}} The error raised was: %s' % (url, data, e))
return data
def parse_sources_data(data, origin='<string>', model=None):
"""
Parse sources file format (tags optional)::
# comments and empty lines allowed
<type> <uri> [tags]
e.g.::
yaml http://foo/rosdep.yaml fuerte lucid ubuntu
If tags are specified, *all* tags must match the current
configuration for the sources data to be used.
:param data: data in sources file | |
tok_list, _global_call):
single_line = False
left_brack = ""
right_brack = ""
str_to_ret = ""
if tok_list[0] in [IF, ELIF, ELSE]:
if tok_list[0] == ELIF:
tok_list[0] = ELSE_IF
if tok_list[0] != ELSE:
if tok_list[1] != LEFTBRACK:
left_brack = LEFTBRACK
right_brack = RIGHTBRACK
str_to_ret += tok_list[0] + SPACE + left_brack
# Multi-line body
if LEFTCURL in tok_list[1:]:
# print(tok_list[1:])
_val, _type = self.__eval_assign_values(
vars_dict, tok_list[1:], _global_call, LEFTCURL)
str_to_ret += _val + right_brack + NEWLINE + LEFTCURL + NEWLINE
# @ TODO
# # Single-line body
# else:
# single_line = True
# _idx = tok_list[1:].index(SEMI)
# _val, _type = self.__eval_assign_values(
# vars_dict, tok_list[1:_idx+1], _global_call, tok_list[_idx])
# str_to_ret += _val + right_brack + \
# SPACE + tok_list[_idx] + SEMI + NEWLINE
# Else Condition
else:
# Multi-line body
if LEFTCURL in tok_list[1:]:
str_to_ret += tok_list[0] + SPACE + LEFTCURL + NEWLINE
# @ TODO
# # Single-line body
# else:
# single_line = True
# _val, _type = self.__eval_assign_values(vars_dict, tok_list[1:], _global_call, SEMI)
# str_to_ret += tok_list[0] + SPACE + _val + SEMI + NEWLINE
if not single_line:
start = tok_list.index(LEFTCURL)+1
sub_toks = []
else:
start = tok_list.index(SEMI)+1
sub_toks = []
while start < len(tok_list):
if tok_list[start] in [IF, ELIF, ELSE]:
str_to_ret += self.__if_elif_else(vars_dict,
tok_list[start:], _global_call)
break
elif tok_list[start] == LOOP:
str_to_ret += self.__loop_until_for(
vars_dict, tok_list[start:], _global_call)
break
elif tok_list[start] == FOR:
str_to_ret += self.__for(vars_dict, tok_list[start:], _global_call)
break
elif tok_list[start] == RIGHTCURL:
str_to_ret += tok_list[start]
start += 1
elif tok_list[start] != SEMI:
sub_toks.append(tok_list[start])
start += 1
elif tok_list[start] == SEMI:
sub_toks.append(tok_list[start])
str_to_ret += self._convert_to_c_str(
[sub_toks], vars_dict, _global_call)
sub_toks.clear()
start += 1
return str_to_ret
def __sleep(self, vars_dict, tok_list, _global_call):
_val, _type = self.__eval_assign_values(
vars_dict, tok_list, _global_call, SEMI)
if _val[0] == LEFTBRACK and _val[-1] == RIGHTBRACK:
return f"sleep{_val};"
else:
return f"sleep({_val});\n"
def __usleep(self, vars_dict, tok_list, _global_call):
_val, _type = self.__eval_assign_values(
vars_dict, tok_list, _global_call, SEMI)
if _val[0] == LEFTBRACK and _val[-1] == RIGHTBRACK:
return f"usleep{_val};"
else:
return f"usleep({_val});\n"
def _in_func_names(self, name):
if name + "_" + self.bin_name in self._vars_dict["FUNCS"]:
return name + "_" + self.bin_name
return False
def __func(self, current_func, toks):
c_func_params = ""
current_func += "_" + self.bin_name
if not current_func in self._vars_dict["FUNCS"]:
func_type = toks[0]
self._vars_dict["FUNCS"].update({current_func: [(), {}]})
prms_and_ret = toks[toks.index(LEFTBRACK): toks.index(LEFTCURL)]
params = prms_and_ret[prms_and_ret.index(
LEFTBRACK)+1: prms_and_ret.index(RIGHTBRACK)]
func_body_toks = toks[toks.index(LEFTCURL)+1:-1]
# Extract return type and value
return_type = prms_and_ret[-1]
ret_val = ""
_sub_type = False
if return_type == COLON or return_type == RIGHTBRACK:
return_type = VOID
if return_type.endswith("[]"):
return_type = return_type[:return_type.find(LEFTSQUARE)]
_sub_type = "__LIST__"
if return_type == INT:
return_type = LONG
ret_val = "0"
elif return_type == FLOAT:
return_type = DOUBLE
ret_val = "0.0f"
elif return_type == STR:
ret_val = ""
# func_type can be `@` or `<`
self._vars_dict["FUNCS"][current_func][0] = [ret_val, return_type, _sub_type, func_type]
if params and params[0] != ":":
# Extract param vars and types and skip if no params
_idx = 0
while (_idx < len(params)):
var = params[_idx]
_type = params[_idx+2]
val = ""
_sub_t = False
if var.endswith("[]"):
var = var[:var.find(LEFTSQUARE)]
_sub_t = "__LIST__"
if _type == INT:
_type = LONG
val = "0"
elif _type == FLOAT:
_type = DOUBLE
val = "0.0f"
elif _type == STR:
val = ""
self._vars_dict["FUNCS"][current_func][1][var] = (val, _type, _sub_t)
if _type == STR:
_type = CHARSTAR
if _sub_t == "__LIST__":
c_func_params += f"{_type} *{var}"
else:
c_func_params += f"{_type} {var}"
# To exclude comma after last param
if _idx < len(params) - 3:
c_func_params += ", "
_idx += 4
start = 0
sub_toks = []
str_to_ret = ""
while start < len(func_body_toks):
if func_body_toks[start] in [IF, ELIF, ELSE]:
str_to_ret += self.__if_elif_else(
self._vars_dict["FUNCS"][current_func][1], func_body_toks[start:], _global_call=False)
break
elif func_body_toks[start] == LOOP:
str_to_ret += self.__loop_until_for(
self._vars_dict["FUNCS"][current_func][1], func_body_toks[start:], _global_call=False)
break
elif func_body_toks[start] == FOR:
str_to_ret += self.__for(self._vars_dict["FUNCS"][current_func][1], func_body_toks[start:], _global_call=False)
break
elif func_body_toks[start] != SEMI:
sub_toks.append(func_body_toks[start])
start += 1
elif func_body_toks[start] == SEMI:
sub_toks.append(func_body_toks[start])
str_to_ret += self._convert_to_c_str(
[sub_toks], self._vars_dict["FUNCS"][current_func][1], _global_call=False)
sub_toks.clear()
start += 1
if return_type == STR and _sub_type != "__LIST__":
return_type = CHARSTAR
elif return_type == STR and _sub_type == "__LIST__":
return_type = CHARSTAR + "*"
elif _sub_type == "__LIST__":
return_type += "*"
func = f"{return_type} {current_func}({c_func_params})" + \
NEWLINE + LEFTCURL + NEWLINE
func += str_to_ret + RIGHTCURL + NEWLINE
self._funcs_impl.append(func)
if func_type == FUNCTION:
self._private_func_list.append(
f"{return_type} {current_func}({c_func_params});")
elif func_type == PUB_FUNC:
self._public_func_list.append(
f"{return_type} {current_func}({c_func_params});")
def __return(self, vars_dict, tok_list):
ret_val, _type = self.__eval_assign_values(
vars_dict, tok_list, False, SEMI)
ret_val = RETURN + SPACE + ret_val + SEMI
return ret_val
def __string_parser(self, string, vars_dict, _global_call, new_line=False):
print_head = 'printf("'
print_tail = ');\n'
result = print_head
frmt = ""
values = ""
length = len(string)
left = 0
right = 0
box_started = False
while (right < length):
c = string[right]
if not box_started:
if c == LEFTCURL and string[right-1] != BACKSLASH:
left = right + 1
box_started = True
# elif c == BACKSLASH and string[right+1] == LEFTCURL or\
# string[right+1] == RIGHTCURL:
# # frmt += string[right+1]
# pass
elif c == NEWLINE:
frmt += "\\n"
# Get the values between box to determine var, func
# TODO support arithmetic operations inside box. EXAMPLES: [1+3], [`> ` * 3]
else:
frmt += c
else:
if c == RIGHTCURL and string[right-1] != BACKSLASH:
var = self.sub_string(string, left, right)
# Remove \[ and \] from substring
_li_conts = ""
if LEFTSQUARE in var or RIGHTSQUARE in var:
var = var.replace(BACKSLASH, "")
if LEFTSQUARE in var and RIGHTSQUARE in var:
_li_conts = var[var.find(LEFTSQUARE)+1:var.find(RIGHTSQUARE)]
_v, t = self.__eval_assign_values(vars_dict, [_li_conts, SEMI], _global_call, SEMI)
_li_conts = LEFTSQUARE + _v + RIGHTSQUARE
var = var[:var.find(LEFTSQUARE)]
# print(var)
var_list = [var, ()]
val, _type = self.__eval_assign_values(
vars_dict, var_list, _global_call, SEMI)
# print(val, _type)
# if _type.endswith("__LIST__"):
# _type = _type[:_type.find("__LIST__")]
# elif _type.endswith("__LIST_DY__"):
# _type = _type[:_type.find("__LIST_DY__")]
# elif _type.endswith("__LIST_ST__"):
# _type = _type[:_type.find("__LIST_ST__")]
# print(val, _type)
# Type
if _type == LONG:
if val.find(SUB) != -1:
frmt += "%d"
else:
frmt += "%ld"
elif _type == BOOL:
frmt += "%d"
elif _type == DOUBLE:
frmt += "%f"
elif _type == STR:
frmt += "%s"
if _li_conts:
# _li_conts += SEMI
# print(_li_conts)
# v = self._convert_to_c_str([[_li_conts]], vars_dict, _global_call)
# print(v)
values += COMA + val+_li_conts
else:
values += COMA + val
box_started = False
right += 1
result += frmt
if new_line:
result += '\\n"'
else:
result += '"'
result += values + print_tail
return result
def __for(self, vars_dict, toks, _global_call):
str_to_ret = ""
_k = "_i_"
_v = ""
_obj = ""
for_body_toks = []
if toks[2] == IN:
_v = toks[1]
_obj = toks[3]
for_body_toks = toks[4:-1]
elif toks[4] == IN:
_k = toks[1]
_v = toks[3]
_obj = toks[5]
for_body_toks = toks[6:-1]
# print(for_body_toks)
_start = None
_end = None
if _obj.endswith(RIGHTSQUARE):
_sliced_str = _obj[_obj.find(LEFTSQUARE)+1:_obj.find(RIGHTSQUARE)]
_obj = _obj[:_obj.find(LEFTSQUARE)]
_v_, _t_ = self.__eval_assign_values(vars_dict, [_sliced_str, ()], _global_call, SEMI)
if _v_.find(COLON) != -1:
_start, _end = _v_.split(COLON)
val, _type = self.__eval_assign_values(vars_dict, [_obj, ()], _global_call, SEMI)
# print(val, _type)
if _end:
if _end.startswith(SUB):
_end = val + "_len" +_end
if _start:
if _start.startswith(SUB):
_start = val+"_len" + _start
vars_dict.update({_v: [_v, _type, "__FOR__"]})
for_body = ""
start = 1
sub_toks = []
while start < len(for_body_toks):
if for_body_toks[start] == FOR:
for_body += self.__for(vars_dict, for_body_toks[start:], _global_call)
break
elif for_body_toks[start] in [IF, ELIF, ELSE]:
for_body += self.__if_elif_else(vars_dict,
for_body_toks[start:], _global_call)
break
elif for_body_toks[start] == LOOP:
for_body += self.__loop_until_for(
vars_dict, for_body_toks[start:], _global_call)
break
elif for_body_toks[start] == RIGHTCURL:
for_body += for_body_toks[start]
start += 1
elif for_body_toks[start] != SEMI:
sub_toks.append(for_body_toks[start])
start += 1
elif for_body_toks[start] == SEMI:
sub_toks.append(for_body_toks[start])
for_body += self._convert_to_c_str(
[sub_toks], vars_dict, _global_call)
sub_toks.clear()
start += 1
if _type == STR:
_type = CHARSTAR
str_to_ret += access_elem_by_ref(_type, _k, _v, val, for_body, _start, _end)
return str_to_ret
def _convert_to_c_str(self, tokens, vars_dict, _global_call=True):
c_str = ""
# print(tokens)
tokens = iter(tokens)
for toks in tokens:
for idx, t | |
not None:
result['tenant_id'] = self.tenant_id
if self.tenant_name is not None:
result['tenant_name'] = self.tenant_name
if self.status is not None:
result['status'] = self.status
if self.commodity_code is not None:
result['commodity_code'] = self.commodity_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('tenant_name') is not None:
self.tenant_name = m.get('tenant_name')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('commodity_code') is not None:
self.commodity_code = m.get('commodity_code')
return self
class QueryInstanceCapacityResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
instance_capacitys: List[InstanceCapacity] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 实例容量列表
self.instance_capacitys = instance_capacitys
def validate(self):
if self.instance_capacitys:
for k in self.instance_capacitys:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['instance_capacitys'] = []
if self.instance_capacitys is not None:
for k in self.instance_capacitys:
result['instance_capacitys'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.instance_capacitys = []
if m.get('instance_capacitys') is not None:
for k in m.get('instance_capacitys'):
temp_model = InstanceCapacity()
self.instance_capacitys.append(temp_model.from_map(k))
return self
class QueryMarketingCouponRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
tenant_id: str = None,
product_codes: List[str] = None,
coupon_type: str = None,
biz_time: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 租户ID
self.tenant_id = tenant_id
# 商品code
self.product_codes = product_codes
# 优惠券类型:VOUCHER 抵用券, CERTAIN 满减券,DISCOUNT 折扣券
self.coupon_type = coupon_type
# 业务发生时间
self.biz_time = biz_time
def validate(self):
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.product_codes, 'product_codes')
self.validate_required(self.biz_time, 'biz_time')
if self.biz_time is not None:
self.validate_pattern(self.biz_time, 'biz_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.product_codes is not None:
result['product_codes'] = self.product_codes
if self.coupon_type is not None:
result['coupon_type'] = self.coupon_type
if self.biz_time is not None:
result['biz_time'] = self.biz_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('product_codes') is not None:
self.product_codes = m.get('product_codes')
if m.get('coupon_type') is not None:
self.coupon_type = m.get('coupon_type')
if m.get('biz_time') is not None:
self.biz_time = m.get('biz_time')
return self
class QueryMarketingCouponResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
coupon_list: List[Coupon] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 优惠券列表
self.coupon_list = coupon_list
def validate(self):
if self.coupon_list:
for k in self.coupon_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['coupon_list'] = []
if self.coupon_list is not None:
for k in self.coupon_list:
result['coupon_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.coupon_list = []
if m.get('coupon_list') is not None:
for k in m.get('coupon_list'):
temp_model = Coupon()
self.coupon_list.append(temp_model.from_map(k))
return self
class SendMarketingCouponRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
biz_no: str = None,
tenant_id: str = None,
template_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 外部业务流水号,用于幂等判断
#
self.biz_no = biz_no
# 租户ID
#
self.tenant_id = tenant_id
# 优惠券模板ID,营销后台申请获取
#
self.template_id = template_id
def validate(self):
self.validate_required(self.biz_no, 'biz_no')
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.template_id, 'template_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.biz_no is not None:
result['biz_no'] = self.biz_no
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.template_id is not None:
result['template_id'] = self.template_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('biz_no') is not None:
self.biz_no = m.get('biz_no')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('template_id') is not None:
self.template_id = m.get('template_id')
return self
class SendMarketingCouponResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
coupon_id: int = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 优惠券id
#
self.coupon_id = coupon_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.coupon_id is not None:
result['coupon_id'] = self.coupon_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('coupon_id') is not None:
self.coupon_id = m.get('coupon_id')
return self
class CreateOrderAfterRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_list: List[str] = None,
tenant_id: str = None,
biz_no: str = None,
instance_labels: List[InstanceLabel] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 待开通类商品列表
self.product_list = product_list
# 租户id
self.tenant_id = tenant_id
# 下单交易流水号,唯一
self.biz_no = biz_no
# 标签对象
self.instance_labels = instance_labels
def validate(self):
self.validate_required(self.product_list, 'product_list')
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.biz_no, 'biz_no')
if self.instance_labels:
for k in self.instance_labels:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_list is not None:
result['product_list'] = self.product_list
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.biz_no is not None:
result['biz_no'] = self.biz_no
result['instance_labels'] = []
if self.instance_labels is not None:
for k in self.instance_labels:
result['instance_labels'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_list') is not None:
self.product_list = m.get('product_list')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('biz_no') is not None:
self.biz_no = m.get('biz_no')
self.instance_labels = []
if m.get('instance_labels') is not None:
for k in m.get('instance_labels'):
temp_model = InstanceLabel()
self.instance_labels.append(temp_model.from_map(k))
return self
class CreateOrderAfterResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
biz_no: str = None,
create_order_response_list: List[CreateOrderResult] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 业务流水号
self.biz_no = biz_no
# 下单返回值。里面有实例id和二级订单号等关键字段。
self.create_order_response_list = create_order_response_list
def validate(self):
if self.create_order_response_list:
for k in self.create_order_response_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.biz_no is not None:
result['biz_no'] = self.biz_no
result['create_order_response_list'] = []
if self.create_order_response_list is not None:
for k in self.create_order_response_list:
result['create_order_response_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('biz_no') is not None:
self.biz_no = m.get('biz_no')
self.create_order_response_list = []
if m.get('create_order_response_list') is not None:
for k in m.get('create_order_response_list'):
temp_model = CreateOrderResult()
self.create_order_response_list.append(temp_model.from_map(k))
return self
class CreateOrderWorkflowRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_list: List[str] = None,
tenant_id: str = None,
biz_no: str = None,
instance_labels: List[InstanceLabel] = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
# 待开通类商品列表
self.product_list = product_list
# 租户id
self.tenant_id = tenant_id
# 下单交易流水号,唯一
self.biz_no = biz_no
# 标签对象
self.instance_labels = instance_labels
def validate(self):
self.validate_required(self.product_list, 'product_list')
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.biz_no, 'biz_no')
self.validate_required(self.instance_labels, 'instance_labels')
if self.instance_labels:
for k in self.instance_labels:
if k:
k.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_list is not None:
result['product_list'] = | |
#######Supplement for "Interpretable classifiers using rules and Bayesian analysis: Building a better stroke prediction model."
###LICENSE
#
#This software is released under the MIT license.
#
#Copyright (c) 2013-14 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
#The author/copyright holder can be contacted at <EMAIL>
####README
#
#This code implements the Bayesian Rule Lists algorithm as described in the
#paper. We include the Titanic dataset in the correct formatting to be used
#by this code.
#
#This code requires the external frequent itemset mining package "PyFIM,"
#available at http://www.borgelt.net/pyfim.html
#
#It is specific to binary classification with binary features (although could
#easily be extended to multiclass).
#
# ##INPUT
#
#The main code to be run by the user is the function "topscript". In addition to
#the parameters described in the paper, in this function you must specify a
#variable "fname" that will specify which data to load.
#The following files must be present and must be formatted as described (see
#titanic data included for an example):
#
# - fname_train.tab : This is the file containing the training X data, for which
#all features are binary. Each line is a data entry in which all of the features
#with value "1" are simply listed, with spaces delimiting different features. For
#example, in the Titanic dataset, "1st_class adult male" is a line in
#titanic.tab.
#
# - fname_train.Y : This file contains the training Y data (labels), and
#contains a line corresponding to each line in fname.tab. Each of the possible
#labels (two for binary classification) is listed (space delimited) and "1" is
#put for the label of that point, and 0 for others. For instance, a data entry
#with label "0" would have "1 0" in its line in fname.Y, and a data entry with
#label "1" would have "0 1" in its line in fname.
#
# - fname_test.tab and fname_test.Y, formatted with the same formatting as the
#training data.
#
# ##OUTPUT
#
#The highest level function, "topscript," returns:
#
# - permsdic - Contains the important information from the MCMC sampling. A
#dictionary whose keys are a string Pickle-dump of the antecedent list d, and
#whose values are a list [a,b] where a is (proportional to) the log posterior of
#d, and b is the number of times d is present in the MCMC samples.
# - d_star - the BRL-point antecedent list. A list of indicies corresponding to
#variable "itemsets."
# - itemsets - A list of itemsets. itemsets[d_star[i]] is the antecedent in
#position i on the BRL-point list
# - theta - A list of the expected value of the posterior consequent
#distribution for each entry in BRL-point.
# - ci_theta - A list of tuples, each the 95% credible interval for the
#corresponding theta.
# - preds_d_star - Predictions on the demo data made using d_star and theta.
# - accur_d_star - The accuracy of the BRL-point predictions, with the decision
#boundary at 0.5.
# - preds_fullpost - Predictions on the demo data using the full posterior
#(BRL-post)
# - accur_fullpost - The accuracy of the BRL-post predictions, decision boundary
#at 0.5.
#
# ##CODE:
from numpy import *
import os,time,json,traceback,sys
from scipy.special import gammaln
from scipy.stats import poisson,beta
import pickle as Pickle
from collections import defaultdict,Counter
from fim import fpgrowth #this is PyFIM, available from http://www.borgelt.net/pyfim.html
try:
from matplotlib import pyplot as plt
except:
pass
###############BRL
#For producing the defaultdict used for storing MCMC results
def default_permsdic():
return [0.,0.]
#Resets the number of MCMC samples stored (value[1]) while maintaining the log-posterior value (so it doesn't need to be re-computed in future chains).
def reset_permsdic(permsdic):
for perm in permsdic:
permsdic[perm][1] = 0.
return permsdic
#Run mcmc for each of the chains, IN SERIAL!
def run_bdl_multichain_serial(numiters,thinning,alpha,lbda,eta,X,Y,nruleslen,lhs_len,maxlhs,permsdic,burnin,nchains,d_inits,verbose=True, seed=42):
# random seed
random.seed(seed)
#Run each chain
t1 = time.clock()
if verbose:
print('Starting mcmc chains')
res = {}
for n in range(nchains):
res[n] = mcmcchain(numiters,thinning,alpha,lbda,eta,X,Y,nruleslen,lhs_len,maxlhs,permsdic,burnin,nchains,d_inits[n])
if verbose:
print('Elapsed CPU time',time.clock()-t1)
#Check convergence
Rhat = gelmanrubin(res)
if verbose:
print('Rhat for convergence:',Rhat)
##plot?
#plot_chains(res)
return res,Rhat
def mcmcchain(numiters,thinning,alpha,lbda,eta,X,Y,nruleslen,lhs_len,maxlhs,permsdic,burnin,nchains,d_init):
res = {}
permsdic,res['perms'] = bayesdl_mcmc(numiters,thinning,alpha,lbda,eta,X,Y,nruleslen,lhs_len,maxlhs,permsdic,burnin,None,d_init)
#Store the permsdic results
res['permsdic'] = {perm:list(vals) for perm,vals in permsdic.items() if vals[1]>0}
#Reset the permsdic
permsdic = reset_permsdic(permsdic)
return res
#Check convergence with GR diagnostic
def gelmanrubin(res):
n = 0 #number of samples per chain - to be computed
m = len(res) #number of chains
phi_bar_j = {}
for chain in res:
phi_bar_j[chain] = 0.
for val in res[chain]['permsdic'].values():
phi_bar_j[chain] += val[1]*val[0] #numsamples*log posterior
n += val[1]
#And normalize
n = n//m #Number of samples per chain (assuming all m chains have same number of samples)
#Normalize, and compute phi_bar
phi_bar = 0.
for chain in phi_bar_j:
phi_bar_j[chain] = phi_bar_j[chain]/float(n) #normalize
phi_bar += phi_bar_j[chain]
phi_bar = phi_bar/float(m) #phi_bar = average of phi_bar_j
#Now B
B = 0.
for chain in phi_bar_j:
B += (phi_bar_j[chain] - phi_bar)**2
B = B*(n/float(m-1))
#Now W.
W = 0.
for chain in res:
s2_j = 0.
for val in res[chain]['permsdic'].values():
s2_j += val[1]*(val[0] -phi_bar_j[chain])**2
s2_j = (1./float(n-1))*s2_j
W += s2_j
W = W*(1./float(m))
#Next varhat
varhat = ((n-1)/float(n))*W + (1./float(n))*B
#And finally,
try:
Rhat = sqrt(varhat/float(W))
except:
print('RuntimeWarning computing Rhat, W='+str(W)+', B='+str(B))
Rhat = 0.
return Rhat
#Plot the logposterior values for the samples in the chains.
def plot_chains(res):
for chain in res:
plt.plot([res[chain]['permsdic'][a][0] for a in res[chain]['perms']])
plt.show()
return
#Merge chains into a single collection of posterior samples
def merge_chains(res):
permsdic = defaultdict(default_permsdic)
for n in res:
for perm,vals in res[n]['permsdic'].items():
permsdic[perm][0] = vals[0]
permsdic[perm][1] += vals[1]
return permsdic
#Get a point estimate with length and width similar to the posterior average, with highest likelihood
def get_point_estimate(permsdic,lhs_len,X,Y,alpha,nruleslen,maxlhs,lbda,eta,verbose=True):
#Figure out the posterior expected list length and average rule size
listlens = []
rulesizes = []
for perm in permsdic:
# with open(perm, 'rb') as file:
# d_t = pickle.loads(file)
# print('perm', perm, type(perm))
# print('perm list', list(perm))
# d_t = Pickle.loads(bytes(perm, encoding="latin1")) #, encoding='bytes')
d_t = Pickle.loads(perm) #, encoding='bytes')
listlens.extend([len(d_t)] * int(permsdic[perm][1]))
rulesizes.extend([lhs_len[j] for j in d_t[:-1]] * int(permsdic[perm][1]))
#Now compute average
avglistlen = average(listlens)
if verbose:
print('Posterior average length:',avglistlen)
try:
avgrulesize = average(rulesizes)
if verbose:
print('Posterior average width:',avgrulesize)
#Prepare the intervals
minlen = int(floor(avglistlen))
maxlen = int(ceil(avglistlen))
minrulesize = int(floor(avgrulesize))
maxrulesize = int(ceil(avgrulesize))
#Run through all perms again
likelihds = []
d_ts = []
beta_Z,logalpha_pmf,logbeta_pmf = prior_calculations(lbda,len(X),eta,maxlhs) #get the constants needed to compute the prior
for perm in permsdic:
if permsdic[perm][1]>0:
d_t = Pickle.loads(perm) #this is the antecedent list
#Check the list length
if len(d_t) >= minlen and len(d_t) <= maxlen:
#Check the rule size
rulesize = average([lhs_len[j] for j in d_t[:-1]])
if rulesize >= minrulesize and rulesize <= maxrulesize:
d_ts.append(d_t)
#Compute the likelihood
R_t = d_t.index(0)
N_t = compute_rule_usage(d_t,R_t,X,Y)
likelihds.append(fn_logposterior(d_t,R_t,N_t,alpha,logalpha_pmf,logbeta_pmf,maxlhs,beta_Z,nruleslen,lhs_len))
likelihds = array(likelihds)
d_star = d_ts[likelihds.argmax()]
except RuntimeWarning:
#This can happen if all perms are identically [0], or if no soln is found within the len and width bounds (probably the chains didn't converge)
print('No suitable point estimate found')
d_star = None
return d_star
#################COMPUTING RESULTS
#Compute the posterior consequent distributions
def get_rule_rhs(Xtrain,Ytrain,d_t,alpha,intervals):
N_t = compute_rule_usage(d_t,d_t.index(0),Xtrain,Ytrain)
theta = []
ci_theta = []
for i,j in enumerate(d_t):
#theta ~ Dirichlet(N[j,:] + alpha)
#E[theta] = (N[j,:] + alpha)/float(sum(N[j,:] + alpha))
#NOTE this result is only for binary classification
#theta = p(y=1)
theta.append((N_t[i,1] + alpha[1])/float(sum(N_t[i,:] + alpha)))
#And now the 95% interval, for Beta(N[j,1] + alpha[1], | |
<reponame>barry-scott/PythonWinAppPackager
#!/usr/bin/python3
#
# win_app_package_exe_config.py
#
import sys
import ctypes
import ctypes.wintypes
import struct
from namedstruct import namedstruct
is_64bit = (ctypes.sizeof( ctypes.c_voidp ) * 8) == 64
# DumpRange used for debug
struct_DumpRange = namedstruct( 'DumpRange', '<'
'8h:row1 '
'8h:row2 '
'8h:row3 '
'8h:row4 '
)
# format of .ICO file on disk
struct_IconDirHeader = namedstruct( 'IconDirHeader', '<'
'h:idReserved '
'h:idType '
'h:idCount'
)
struct_IconDirEntry = namedstruct( 'IconDirEntry', '<'
'b:bWidth '
'b:bHeight '
'b:bColorCount '
'b:bReserved '
'h:wPlanes '
'h:wBitCount '
'i:dwBytesInRes '
'i:dwImageOffset'
)
# Resource formats
struct_GrpIconDir = namedstruct( 'GrpIconDir', '<'
'h:idReserved '
'h:idType '
'h:idCount'
)
struct_GrpIconDirEntry = namedstruct( 'GrpIconDirEntry', '<'
'b:bWidth '
'b:bHeight '
'b:bColorCount '
'b:bReserved '
'h:wPlanes '
'h:wBitCount '
'i:dwBytesInRes '
'h:nID'
)
# VS_VERSIONINFO
struct_VersionInfoHeader = namedstruct( 'VersionInfo', '<'
'h:wLength ' # sizeof( struct_VersionInfoHeader ) + sizeof( struct_VersionInfoFixedFileInfo )
'h:wValueLength '
'h:wType '
'16h:szKey ' # room for "VS_VERSION_INFO\0" in utf-16
'h:Padding1 '
)
# VS_FIXEDFILEINFO
struct_VersionInfoFixedFileInfo = namedstruct( 'VersionInfoFixedFileInfo', '<'
'I:dwSignature ' # magic value 0xFEEF04BD
'h:dwStrucVersion1 ' # ???
'h:dwStrucVersion0 ' # ???
'h:dwFileVersion1 '
'h:dwFileVersion0 '
'h:dwFileVersion3 '
'h:dwFileVersion2 '
'h:dwProductVersion1 '
'h:dwProductVersion0 '
'h:dwProductVersion3 '
'h:dwProductVersion2 '
'i:dwFileFlagsMask '
'i:dwFileFlags '
'i:dwFileOS '
'i:dwFileType '
'i:dwFileSubtype '
'i:dwFileDateMS '
'i:dwFileDateLS '
)
# dwSignature value
VER_SIGNATURE_MAGIC = 0xFEEF04BD
# dwFileOS value
VOS_NT_WINDOWS32 = 0x00040004
# dwFileType
VFT_APP = 0x00000001
# dwFileSubtype
VFT2_UNKNOWN = 0x00000000
# Child of VS_VERSION_INFO Header
struct_ChildOfVersionInfoHeader = namedstruct( 'ChildOfVersionInfoHeader', '<'
'h:wLength '
'h:wValueLength '
'h:wType '
)
# StringFileInfo
struct_StringFileInfo = namedstruct( 'StringFileInfo', '<'
'h:wLength '
'h:wValueLength '
'h:wType '
'15h:szKey ' # "StringFileInfo".
)
# VarFileInfo
struct_VarFileInfo = namedstruct( 'VarFileInfo', '<'
'h:wLength '
'h:wValueLength '
'h:wType '
'12h:szKey ' # "VarFileInfo".
'h:padding'
)
struct_Var = namedstruct( 'Var', '<'
'h:wLength '
'h:wValueLength '
'h:wType '
'11h:szKey ' # "Translation".
'h:Padding '
# list of VarValue
)
struct_VarValue = namedstruct( 'VarValue', '<'
'I:value '
)
# StringTable
struct_StringTable = namedstruct( 'StringTable', '<'
'h:wLength '
'h:wValueLength '
'h:wType '
'15h:szKey '
'h:Padding '
# n of String
)
# String
struct_StringHeader = namedstruct( 'StringHeader', '<'
'h:wLength '
'h:wValueLength '
'h:wType '
# 0 terminated wchar string
# pad to 32 bit
# 0 terminated wchar string
)
# ctypes functions
BeginUpdateResource = ctypes.windll.kernel32.BeginUpdateResourceW
BeginUpdateResource.argtypes = (ctypes.wintypes.LPCWSTR
,ctypes.wintypes.BOOL)
BeginUpdateResource.restype = ctypes.wintypes.HANDLE
UpdateResource = ctypes.windll.kernel32.UpdateResourceW
if is_64bit:
UpdateResource.argtypes = (ctypes.wintypes.HANDLE
,ctypes.wintypes.ULARGE_INTEGER
,ctypes.wintypes.ULARGE_INTEGER
,ctypes.wintypes.WORD
,ctypes.wintypes.LPVOID
,ctypes.wintypes.DWORD)
else:
UpdateResource.argtypes = (ctypes.wintypes.HANDLE
,ctypes.wintypes.DWORD
,ctypes.wintypes.DWORD
,ctypes.wintypes.WORD
,ctypes.wintypes.LPVOID
,ctypes.wintypes.DWORD)
EndUpdateResource = ctypes.windll.kernel32.EndUpdateResourceW
EndUpdateResource.argtypes = (ctypes.wintypes.HANDLE
,ctypes.wintypes.BOOL)
LoadLibraryEx = ctypes.windll.kernel32.LoadLibraryExW
LoadLibraryEx.argtypes = (ctypes.wintypes.LPCWSTR
,ctypes.wintypes.HANDLE
,ctypes.wintypes.DWORD)
LoadLibraryEx.restype = ctypes.wintypes.HANDLE
LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE = 0x00000040
FreeLibrary = ctypes.windll.kernel32.FreeLibrary
FreeLibrary.argtypes = (ctypes.wintypes.HANDLE,)
FindResource = ctypes.windll.kernel32.FindResourceW
FindResource.argtypes = (ctypes.wintypes.HANDLE
,ctypes.wintypes.ULARGE_INTEGER
,ctypes.wintypes.ULARGE_INTEGER)
FindResource.restype = ctypes.wintypes.HANDLE
LoadResource = ctypes.windll.kernel32.LoadResource
LoadResource.argtypes = (ctypes.wintypes.HANDLE
,ctypes.wintypes.HANDLE)
LoadResource.restype = ctypes.wintypes.HANDLE
SizeofResource = ctypes.windll.kernel32.SizeofResource
SizeofResource.argtypes = (ctypes.wintypes.HANDLE
,ctypes.wintypes.HANDLE)
SizeofResource.restype = ctypes.wintypes.DWORD
LockResource = ctypes.windll.kernel32.LockResource
LockResource.argtypes = (ctypes.wintypes.HANDLE,)
LockResource.restype = ctypes.c_void_p
RT_ICON = 3
RT_GROUP_ICON = 14
RT_VERSION = 16
RT_STRING = 6 # STRINGTABLE
STRINGTABLE_BUNDLE_SIZE = 16
resource_ids = {
'IDS_PYTHON_DLL': 64*STRINGTABLE_BUNDLE_SIZE + 0,
'IDS_MAIN_PY_MODULE': 64*STRINGTABLE_BUNDLE_SIZE + 1,
'IDS_INSTALL_FOLDER_KEY': 64*STRINGTABLE_BUNDLE_SIZE + 2,
'IDS_INSTALL_FOLDER_VALUE': 64*STRINGTABLE_BUNDLE_SIZE + 3,
'IDS_PY_VERBOSE': 65*STRINGTABLE_BUNDLE_SIZE + 0,
}
# called when used standalone
def main( argv ):
if argv[1:2] == ['bootstrap']:
exe_filename = argv[2]
python_dll = argv[3]
main_py_module = agv[4]
install_key = argv[5]
install_value = argv[6]
return configureAppExeBootStrap( exe_filename, python_dll, main_py_module, install_key, install_value )
elif argv[1:2] == ['flags']:
exe_filename = argv[2]
py_verbose = argv[3]
return configureAppExePyFlags( exe_filename, py_verbose )
elif argv[1:2] == ['create']:
return createResourceIdHeaderFile( argv[2] )
elif argv[1:2] == ['icon']:
return updateIconInExe( argv[3], argv[2] )
elif argv[1:2] == ['show-version-info']:
return showVersionInfoInExe( argv[2] )
elif argv[1:2] == ['set-version-info']:
return setVersionInfoInExe( argv[2], argv[3:] )
else:
print( 'Usage: %s bootstrap <exefile> <python_dll> <main_py_module> <install_key> <install_value>' % (argv[0],) )
print( ' %s flags <exefile> <verbose>' % (argv[0],) )
print( ' %s create <header-filename>' % (argv[0],) )
print( ' %s show-version-info <exefile>' % (argv[0],) )
return 1
# called when part of win_app_packager
def flagsCommand( argv ):
if len(argv) != 4:
return usage()
exe_filename = argv[2]
py_verbose = argv[3]
return configureAppExePyFlags( exe_filename, py_verbose )
# called when part of win_app_packager
def usage():
################################################################################
print(
'''python3 -m win_app_packager flags <exe-file> <verbose>'
exe-file
- the win_app_package create EXE file to modify
verbose
- either "0" or "1". The value to see the python verbose flag to.
''' )
return 1
def createResourceIdHeaderFile( h_file ):
with open( h_file, 'w' ) as f:
for name in resource_ids:
f.write( '#define %s %d\n' % (name, resource_ids[ name ]) )
return 0
def configureAppExeBootStrap( exe_filename, python_dll, main_py_module, install_key, install_value ):
all_strings = [
python_dll,
main_py_module,
install_key,
install_value,
]
return updateStringBundleInExe( exe_filename, 'IDS_INSTALL_FOLDER_KEY', all_strings )
def configureAppExePyFlags( exe_filename, py_verbose ):
all_strings = [
py_verbose,
]
return updateStringBundleInExe( exe_filename, 'IDS_PY_VERBOSE', all_strings )
def updateStringBundleInExe( exe_filename, stringtable_id_name, all_strings ):
stringtable_id = resource_ids[ stringtable_id_name ]
while len(all_strings) != STRINGTABLE_BUNDLE_SIZE:
all_strings.append( '' )
all_strtab_data = []
for s in all_strings:
count = struct.pack( '<H', len( s ) )
all_strtab_data.append( count )
if len(s) > 0:
data = s.encode( 'utf-16' )[2:]
all_strtab_data.append( data )
strtab_data = b''.join( all_strtab_data )
language = 0
strtab_id = stringtable_id//STRINGTABLE_BUNDLE_SIZE + 1
h = BeginUpdateResource( exe_filename, False )
#print( h, ctypes.FormatError() )
rc = UpdateResource( h, RT_STRING, strtab_id, language, strtab_data, len(strtab_data) )
#print( rc, ctypes.FormatError() )
rc = EndUpdateResource( h, False );
#print( rc, ctypes.FormatError() )
return 0
def updateIconInExe( exe_filename, icon_filename ):
with open( icon_filename, 'rb' ) as f:
all_entries = []
all_images = []
header = struct_IconDirHeader.unpack( f.read( len(struct_IconDirHeader) ) )
for i in range( header.idCount ):
all_entries.append(
struct_IconDirEntry.unpack(
f.read( len(struct_IconDirEntry) ) ) )
for entry in all_entries:
f.seek( entry.dwImageOffset, 0 )
all_images.append( f.read( entry.dwBytesInRes ) )
h = BeginUpdateResource( exe_filename, False )
#print( h, ctypes.FormatError() )
grp_header = struct_GrpIconDir.packer()
grp_header.idReserved = 0
grp_header.idType = header.idType
grp_header.idCount = header.idCount
all_data = [grp_header.pack()]
entry_id = 1
for entry in all_entries:
grp_entry = struct_GrpIconDirEntry.packer()
grp_entry.bWidth = entry.bWidth
grp_entry.bHeight = entry.bHeight
grp_entry.bColorCount = entry.bColorCount
grp_entry.bReserved = entry.bReserved
grp_entry.wPlanes = entry.wPlanes
grp_entry.wBitCount = entry.wBitCount
grp_entry.dwBytesInRes = entry.dwBytesInRes
grp_entry.nID = entry_id
all_data.append( grp_entry.pack() )
entry_id += 1
language = 0
data = b''.join( all_data )
rc = UpdateResource( h, RT_GROUP_ICON, 1, language, data, len(data) )
#print( rc, ctypes.FormatError() )
entry_id = 1
for image in all_images:
rc = UpdateResource( h, RT_ICON, entry_id, language, image, len(image) )
#print( rc, ctypes.FormatError() )
entry_id += 1
rc = EndUpdateResource( h, False );
#print( rc, ctypes.FormatError() )
return 0
def dprint( msg ):
#print( '|', msg )
pass
def showVersionInfoInExe( exe_filename ):
vir = VersionInfoResource( exe_filename )
print( ' dwStrucVersion: %d.%d' % (vir.fixed.dwStrucVersion0, vir.fixed.dwStrucVersion1) )
print( ' FileVersion: %d.%d.%d.%d' % (vir.fixed.dwFileVersion0, vir.fixed.dwFileVersion1, vir.fixed.dwFileVersion2, vir.fixed.dwFileVersion3) )
print( ' ProductVersion: %d.%d.%d.%d' % (vir.fixed.dwProductVersion0, vir.fixed.dwProductVersion1, vir.fixed.dwProductVersion2, vir.fixed.dwProductVersion3) )
print( ' encoding: %s' % (vir.encoding,) )
for key in sorted( vir.all_properties ):
print( ' %18s: "%s"' % (key, vir.all_properties[ key ]) )
print( ' translation value: 0x%8.8x' % (vir.translation.value,) )
def setVersionInfoInExe( exe_filename, argv ):
vir = VersionInfoResource( exe_filename )
args = iter(argv)
for name in args:
value = next(args)
if name.lower() == 'version':
vir.setVersion( value )
else:
vir.setProperty( name, value )
vir.updateWithChanges()
class VersionInfoResource:
def __init__( self, exe_filename ):
self.exe_filename = exe_filename
self.__loadVersionInfo()
self.pack_header = struct_VersionInfoHeader.packer( self.header )
self.pack_fixed = struct_VersionInfoFixedFileInfo.packer( self.fixed )
self.pack_child = struct_ChildOfVersionInfoHeader.packer( self.child )
self.pack_string_file_info = struct_StringFileInfo.packer( self.string_file_info )
def setProperty( self, name, value ):
self.all_properties[ name ] = value
def setVersion( self, int_version ):
assert len(int_version) == 4, 'bad version %r' % (int_version,)
str_version = '%d.%d.%d.%d' % int_version
self.all_properties[ 'ProductVersion' ] = str_version
self.pack_fixed.dwProductVersion0 = int_version[0]
self.pack_fixed.dwProductVersion1 = int_version[1]
self.pack_fixed.dwProductVersion2 = int_version[2]
self.pack_fixed.dwProductVersion3 = int_version[3]
self.all_properties[ 'FileVersion' ] = str_version
self.pack_fixed.dwFileVersion0 = int_version[0]
self.pack_fixed.dwFileVersion1 = int_version[1]
self.pack_fixed.dwFileVersion2 = int_version[2]
self.pack_fixed.dwFileVersion3 = int_version[3]
def updateWithChanges( self ):
version_resource = self.__encodeVersionInfo()
self.__decodeVersionInfo( version_resource )
h = BeginUpdateResource( self.exe_filename, False )
dprint( 'BeginUpdateResource() -> 0x%x - %s' % (h, ctypes.FormatError()) )
language = 0
version_id = 1
rc = UpdateResource( h, RT_VERSION, version_id, language, version_resource, len(version_resource) )
dprint( 'UpdateResource() -> 0x%x - %s' % (rc, ctypes.FormatError()) )
rc = EndUpdateResource( h, False );
dprint( 'EndUpdateResource() => 0x%x' % (rc,) )
return 0
def __encodeVersionInfo( self ):
# pack in reverse order so that sizes are available
# the tail is copied as is
# string properties block
all_string_properties = []
for name, value in self.all_properties.items():
sz_name = packSzChar( name )
if value == '':
sz_value = b''
else:
sz_value = packSzChar( value )
offset = len(struct_StringHeader) + len(sz_name)
dprint( 'zzz name %r value %r offset %d' % | |
TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# SecondaryCaptureDeviceManufacturerModelName
0x00181018L: {
'SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Equipment'],
None: ['Equipment'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Equipment'],
'ENCAPSULATED CDA IOD': ['Equipment'],
},
# ReferencedImageEvidenceSequence
0x00089092L: {
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# SegmentedGreenPaletteColorLookupTableData
0x00281222L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
None: ['Image', 'Color Palette', 'Presentation State'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
},
# CoverageOfKSpace
0x00189094L: {
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# ImplantName
0x00221095L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# ReferringPhysicianIdentificationSequence
0x00080096L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'BASIC CARDIAC EP IOD': ['Study'],
'RT TREATMENT SUMMARY RECORD IOD': ['Study'],
'12-LEAD ECG IOD': ['Study'],
'RESPIRATORY WAVEFORM IOD': ['Study'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Study'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Study'],
'BASIC VOICE AUDIO IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Study'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Study'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Study'],
'BASIC TEXT SR IOD': ['Study'],
'NM IMAGE IOD': ['Study'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'LENSOMETRY MEASUREMENTS IOD': ['Study'],
'MR SPECTROSCOPY IOD': ['Study'],
'ENCAPSULATED PDF IOD': ['Study'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CHEST CAD SR IOD': ['Study'],
'HEMODYNAMIC IOD': ['Study'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Study'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Study'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Study'],
'ENHANCED MR COLOR IMAGE IOD': ['Study'],
'ENHANCED CT IMAGE IOD': ['Study'],
'X-RAY RADIATION DOSE SR IOD': ['Study'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Study'],
'PROCEDURE LOG IOD': ['Study'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Study'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Study'],
'STEREOMETRIC RELATIONSHIP IOD': ['Study'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Study'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Study'],
'VL ENDOSCOPIC IMAGE IOD': ['Study'],
'KERATOMETRY MEASUREMENTS IOD': ['Study'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Study'],
'COMPREHENSIVE SR IOD': ['Study'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Study'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Study'],
'SPATIAL FIDUCIALS IOD': ['Study'],
'RT ION PLAN IOD': ['Study'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CT IMAGE IOD': ['Study'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Study'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Study'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Study'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'RT DOSE IOD': ['Study'],
'AMBULATORY ECG IOD': ['Study'],
'SURFACE SEGMENTATION IOD': ['Study'],
'MAMMOGRAPHY CAD SR IOD': ['Study'],
'VL MICROSCOPIC IMAGE IOD': ['Study'],
'RT BEAMS TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# VolumetricProperties
0x00089206L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# TransmitterFrequency
0x00189098L: {
'MR SPECTROSCOPY IOD': ['Equipment'],
None: ['Equipment'],
},
# TableFeedPerRotation
0x00189310L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# BrachyTreatmentType
0x300A0202L: {
'RT PLAN IOD': ['Plan'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
None: ['Treatment Record', 'Plan'],
},
# ChannelDescriptionCodeSequence
0x0022001AL: {
None: ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
},
# RespiratoryMotionCompensationTechnique
0x00189170L: {
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
},
# DeviceSerialNumber
0x00181000L: {
'BASIC STRUCTURED DISPLAY IOD': ['Equipment'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Equipment'],
'RT BRACHY TREATMENT RECORD IOD': ['Equipment'],
'RT STRUCTURE SET IOD': ['Equipment'],
'RT PLAN IOD': ['Equipment'],
'CR IMAGE IOD': ['Equipment'],
'RAW DATA IOD': ['Equipment'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Equipment'],
'ENHANCED MR IMAGE IOD': ['Equipment'],
'BASIC CARDIAC EP IOD': ['Equipment'],
'RT TREATMENT SUMMARY RECORD IOD': ['Equipment'],
'12-LEAD ECG IOD': ['Equipment'],
'RESPIRATORY WAVEFORM IOD': ['Equipment'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Equipment'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Equipment'],
'BASIC VOICE AUDIO IOD': ['Equipment'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Equipment'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Equipment'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Equipment'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Equipment'],
'BASIC TEXT SR IOD': ['Equipment'],
'NM IMAGE IOD': ['Equipment'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Equipment'],
'LENSOMETRY MEASUREMENTS IOD': ['Equipment'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Equipment'],
'CHEST CAD SR IOD': ['Equipment'],
'HEMODYNAMIC IOD': ['Equipment'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Equipment'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Equipment'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Equipment'],
'ENHANCED MR COLOR IMAGE IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Equipment'],
'X-RAY RADIATION DOSE SR IOD': ['Equipment'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Equipment'],
'PROCEDURE LOG IOD': ['Equipment'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Equipment'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Equipment'],
'STEREOMETRIC RELATIONSHIP IOD': ['Equipment'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Equipment'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Equipment'],
'VL ENDOSCOPIC IMAGE IOD': ['Equipment'],
'KERATOMETRY MEASUREMENTS IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Equipment'],
'COMPREHENSIVE SR IOD': ['Equipment'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Equipment'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Equipment'],
'SPATIAL FIDUCIALS IOD': ['Equipment'],
| |
import os
import System.Drawing.Color
import Rhino.Geometry as rg
import Rhino
import ghpythonlib.components as ghcomp
from collections import OrderedDict
from collections import defaultdict
import math
import subprocess
import alpaca4dUtil
#TODO
# USE SELF.TYPEELEMENT TO WRITE THE OBJECT TYPE
# GEOMTRANSFORMATION NEEDS TO BE IS OWN OBJECT
# BEAM INTEGRATION OWN OBJECT
class Model(object):
def __init__(self, ndm = 3, ndf = 6):
self.ndm = ndm
self.ndf = ndf
self.uniquePoints = []
self.uniquePointsThreeNDF = []
self.uniquePointsSixNDF = []
self.cloudPoint = None
self.cloudPointThreeNDF = None
self.cloudPointSixNDF = None
self.RTreeCloudPoint = None
self.RTreeCloudPointThreeNDF = None
self.RTreeCloudPointSixNDF = None
self.nodes = []
self.elements = []
self.supports = []
self.constraints = []
self.loads = []
self.materials = []
self.timeSeries = {}
self.beams = []
self.shells = []
self.bricks = []
self.recorder = []
self.recorderName = None
#self.pointLoad = defaultdict(list)
self.pointLoad = []
self.filename = None
self.threeNDFModel = []
self.sixNDFModel = []
self.analysis = None
self.isAnalysed = None
self.isModal = None
self.py = []
def getuniquePoints(self, beams, shells, bricks, constraints):
"""Cull Duplicates Points.
Keyword arguments:
beams -- List of beams
shells -- List of shells
return (List of uniquePoints, PointCloud)
"""
threeNDFPoints = []
sixNDFPoints = []
AllPoints = []
for beam in beams:
# need to add an if for truss elements
PointAtStart = beam.Crv.PointAtStart
PointAtEnd = beam.Crv.PointAtEnd
sixNDFPoints.append(PointAtStart)
sixNDFPoints.append(PointAtEnd)
for shell in shells:
vertices = shell.Mesh.Vertices.ToPoint3dArray()
for node in vertices:
sixNDFPoints.append(node)
for brick in bricks:
vertices = brick.Mesh.Vertices.ToPoint3dArray()
for node in vertices:
threeNDFPoints.append(node)
for constraint in constraints:
if constraint.type == "rigidDiaphragm":
sixNDFPoints.extend(constraint.slaveNodes)
sixNDFPoints.append(constraint.masterNode)
elif constraint.type == "equalDOF":
sixNDFPoints.append(constraint.slaveNodes)
sixNDFPoints.append(constraint.masterNode)
if not threeNDFPoints:
self.uniquePointsThreeNDF = []
else:
#self.uniquePointsThreeNDF = rg.Point3d.CullDuplicates(threeNDFPoints, 0.001)
self.uniquePointsThreeNDF = alpaca4dUtil.removeDuplicates(threeNDFPoints, 0.001)
if not sixNDFPoints:
self.uniquePointsSixNDF = []
else:
#self.uniquePointsSixNDF = rg.Point3d.CullDuplicates(sixNDFPoints, 0.001)
self.uniquePointsSixNDF = alpaca4dUtil.removeDuplicates(sixNDFPoints, 0.001)
#self.uniquePoints = self.uniquePointsThreeNDF + self.uniquePointsSixNDF
if not self.uniquePointsThreeNDF:
self.uniquePoints = self.uniquePointsSixNDF
elif not self.uniquePointsSixNDF:
self.uniquePoints = self.uniquePointsThreeNDF
else:
self.uniquePoints = self.uniquePointsThreeNDF + self.uniquePointsSixNDF
#self.cloudPoint = rg.PointCloud(self.uniquePoints)
#self.RTreeCloudPoint = rg.RTree.CreateFromPointArray(self.uniquePoints)
self.cloudPointThreeNDF = rg.PointCloud(self.uniquePointsThreeNDF)
self.RTreeCloudPointThreeNDF = rg.RTree.CreateFromPointArray(self.uniquePointsThreeNDF)
self.cloudPointSixNDF = rg.PointCloud(self.uniquePointsSixNDF)
self.RTreeCloudPointSixNDF = rg.RTree.CreateFromPointArray(self.uniquePointsSixNDF)
return
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return "<Class Model>"
#TODO
@staticmethod
def write3ndf_py():
return "ops.model('basic', '-ndm', 3, '-ndf', 3)\n"
@staticmethod
def write6ndf_py():
return "ops.model('basic', '-ndm', 3, '-ndf', 6)\n"
#TODO
@staticmethod
def write3ndf_tcl():
return "model BasicBuilder -ndm 3 -ndf 3\n"
@staticmethod
def write6ndf_tcl():
return "model BasicBuilder -ndm 3 -ndf 6\n"
def writeFile(self, filename):
textFile = self.py
with open(filename, 'w') as f:
for line in textFile:
f.write("%s\n" % line)
self.filename = filename
pass
def runOpensees(self, args):
if args == 'python':
executable = 'python.exe'
elif args == 'tcl':
executable = 'opensees.exe'
else:
executable = args
process = subprocess.Popen([executable, self.filename], shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return stdout, stderr
def runEigen(self, executable):
process = subprocess.Popen([executable, self.filename], shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = process.communicate()
delim = 'Italy.'
eigen = stderr.partition(delim)[2].strip().split('\r\n')
return stdout, eigen
class Node(object):
def __init__(self, Pos):
self.Pos = Pos
self.nodeTag = None
self.ndf = None
self.mass = 0
self.displacement = None
self.rotation = None
def setNodeTag(self, model):
if self.ndf == 3:
self.nodeTag = model.cloudPointThreeNDF.ClosestPoint(self.Pos) + 1
elif self.ndf == 6:
self.nodeTag = model.cloudPointSixNDF.ClosestPoint(self.Pos) + 1 + len(model.uniquePointsThreeNDF)
pass
def setNodeTagRTree(self, RTreeCloudPoint):
closestIndices = []
#event handler of type RTreeEventArgs
def SearchCallback(sender, e):
closestIndices.Add(e.Id)
RTreeCloudPoint.Search(rg.Sphere(self.Pos, 0.001), SearchCallback)
ind = closestIndices
self.nodeTag = ind[0]
pass
def setMassNode(self, MassElement):
self.mass = MassElement
pass
## override Rhino .ToString() method (display name of the class in Gh)
def ToString(self):
return self.write_tcl()
def write_tcl(self):
if self.ndf == 6:
tcl_text = "node {} {} {} {} -mass {} {} {} {} {} {}".format(self.nodeTag, float(self.Pos.X), float(self.Pos.Y), float(self.Pos.Z), self.mass, self.mass, self.mass, self.mass, self.mass, self.mass)
elif self.ndf == 3:
tcl_text = tcl_text = "node {} {} {} {} -mass {} {} {}".format(self.nodeTag, float(self.Pos.X), float(self.Pos.Y), float(self.Pos.Z), self.mass, self.mass, self.mass)
else:
raise ValueError('No ndf has been assigned')
return tcl_text
def write_py(self):
if self.ndf == 6:
py_text = "ops.node({}, {}, {}, {}, '-mass', {}, {}, {}, {}, {}, {})".format(self.nodeTag, float(self.Pos.X), float(self.Pos.Y), float(self.Pos.Z), self.mass, self.mass, self.mass, self.mass, self.mass, self.mass)
elif self.ndf == 3:
py_text = "ops.node({}, {}, {}, {}, '-mass', {}, {}, {})".format(self.nodeTag, float(self.Pos.X), float(self.Pos.Y), float(self.Pos.Z), self.mass, self.mass, self.mass)# ops.node(nodeTag, *crds, '-ndf', ndf, '-mass', *mass)
else:
raise ValueError('No ndf has been assigned')
return py_text
#####################################################
### Material ########################################
#####################################################
class uniAxialMaterialElastic(object):
def __init__(self, matName, E, Eneg, eta, G, v, rho):
"""Generate a uniaxial Elastic Material
Inputs:
matName: Name of the material.
E: Young's Modulus [MPa].
eta = damping tangent.
G: Tangential Modulus [MPa].
v: Poisson ratio.
rho: specific weight [kN/m3].
fy: Yield stress value of the material [MPa]"""
self.matName = matName
self.E = E
self.Eneg = Eneg
self.eta = eta
self.G = G
self.v = v
self.rho = rho
self.matTag = None
if self.eta == None:
self.eta = 0.0
if self.Eneg == None:
self.Eneg = E
self.materialDimension = "uniAxialMaterial"
self.materialType = "Elastic"
if self.v == None:
self.G = G # Input value in N/mm2 ---> Output kN/m2
self.v = (E / (2 * G)) - 1
else:
self.G = E / (2 * (1 + v))
def ToString(self):
return self.write_tcl()
def write_tcl(self):
return "uniaxialMaterial Elastic {} {} {} {}\n".format(self.matTag, self.E, self.eta, self.Eneg)
def write_py(self):
return "ops.uniaxialMaterial('Elastic', {}, {}, {}, {})\n".format(self.matTag, self.E, self.eta, self.Eneg)
class uniAxialMaterialConcreate01(object):
def __init__(self, matName, fc, ec0, fcu, ecu):
"""Generate a uniaxial Elastic Material Concreate01
link for information: https://opensees.berkeley.edu/wiki/index.php/Concrete01_Material_--_Zero_Tensile_Strength
Inputs:
matName: Name of the material.
fc: concrete compressive strength at 28 days (compression is negative) [MPa].
fcu: concrete crushing strength[MPa].
ec0: concrete strain at maximum strength.
ecu: concrete strain at crushing strength."""
self.matName = matName
self.fc = fc
self.fcu = fcu
self.ec0 = ec0
self.ecu = ecu
self.matTag = None
self.materialDimension = "uniAxialMaterial"
self.materialType = "Concrete01"
def ToString(self):
return self.write_tcl()
def write_tcl(self):
return "uniaxialMaterial Concrete01 {} {} {} {} {}\n".format(self.matTag, self.fc, self.ec0, self.fcu, self.ecu)
def write_py(self):
return "ops.uniaxialMaterial('Concrete01', {}, {}, {}, {}, {})\n".format(self.matTag, self.fc, self.ec0, self.fcu, self.ecu)
class uniAxialMaterialSteel01(object):
def __init__(self, matName, fy, E0, b, a1, a2, a3, a4):
"""Generate a uniaxial Elastic Material Steel01
link for information: https://opensees.berkeley.edu/wiki/index.php/Steel01_Material
Inputs:
matName: Name of the material.
fy: yield strength [MPa].
E0: initial elastic tangent[MPa].
b: strain-hardening ratio (ratio between post-yield tangent and initial elastic tangent).
a1: isotropic hardening parameter, increase of compression yield envelope as proportion of yield strength after a plastic strain of $a2*($Fy/E0). (optional).
a2: isotropic hardening parameter (see explanation under $a1). (optional).
a3: isotropic hardening parameter, increase of tension yield envelope as proportion of yield strength after a plastic strain of $a4*($Fy/E0). (optional)
a4: isotropic hardening parameter (see explanation under $a3). (optional)"""
self.matName = matName
self.fy = fy
self.E0 = E0
self.b = b
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.a4 = a4
self.matTag = None
self.materialDimension = "uniAxialMaterial"
self.materialType = "Steel01"
def ToString(self):
return self.write_tcl()
def write_tcl(self):
if self.a1 != None and self.a2 == None and self.a3 == None and self.a4 == None:
return "uniaxialMaterial Steel01 {} {} {} {} {}\n".format(self.matTag, self.fy, self.E0, self.b, self.a1)
elif self.a1 != None and self.a2 != None and self.a3 == None and self.a4 == None:
return "uniaxialMaterial Steel01 {} {} {} {} {} {}\n".format(self.matTag, self.fy, self.E0, self.b, self.a1, self.a2)
elif self.a1 != None and self.a2 != None and self.a3 != None and self.a4 == None:
return "uniaxialMaterial Steel01 {} {} {} {} {} {} {}\n".format(self.matTag, self.fy, self.E0, self.b, self.a1, self.a2, self.a3)
elif self.a1 != None and self.a2 != None and self.a3 != None and self.a4 != None:
return "uniaxialMaterial Steel01 {} {} {} {} {} {} {} {}\n".format(self.matTag, self.fy, self.E0, self.b, self.a1, self.a2, self.a3, self.a4)
else:
return "uniaxialMaterial Steel01 {} {} {} {}\n".format(self.matTag, self.fy, self.E0, self.b)
def write_py(self):
if self.a1 != None and self.a2 == None and self.a3 == None and self.a4 == None:
return "ops.uniaxialMaterial('Steel01', {}, {}, {}, {}, {})\n".format(self.matTag, self.fy, self.E0, self.b, self.a1)
elif self.a1 != None and self.a2 != None and self.a3 == None and self.a4 == None:
return "ops.uniaxialMaterial('Steel01', {}, | |
<reponame>rcbops/glance-buildpackage<filename>glance/store/swift.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for SWIFT"""
from __future__ import absolute_import
import hashlib
import httplib
import logging
import math
import tempfile
import urlparse
from glance.common import cfg
from glance.common import exception
import glance.store
import glance.store.base
import glance.store.location
try:
from swift.common import client as swift_client
except ImportError:
pass
DEFAULT_CONTAINER = 'glance'
DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M
ONE_MB = 1000 * 1024
logger = logging.getLogger('glance.store.swift')
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Swift URI. A Swift URI can look like any of
the following:
swift://user:[email protected]/container/obj-id
swift://account:user:[email protected]/container/obj-id
swift+http://user:[email protected]/container/obj-id
swift+https://user:[email protected]/container/obj-id
The swift+http:// URIs indicate there is an HTTP authentication URL.
The default for Swift is an HTTPS authentication URL, so swift:// and
swift+https:// are the same...
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'swift+https')
self.user = self.specs.get('user')
self.key = self.specs.get('key')
self.authurl = self.specs.get('authurl')
self.container = self.specs.get('container')
self.obj = self.specs.get('obj')
def _get_credstring(self):
if self.user:
return '%s:%s@' % (self.user, self.key)
return ''
def get_uri(self):
authurl = self.authurl
if authurl.startswith('http://'):
authurl = authurl[7:]
elif authurl.startswith('https://'):
authurl = authurl[8:]
credstring = self._get_credstring()
authurl = authurl.strip('/')
container = self.container.strip('/')
obj = self.obj.strip('/')
return '%s://%s%s/%s/%s' % (self.scheme, credstring, authurl,
container, obj)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:[email protected]/container/obj
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _(
"URI cannot contain more than one occurrence of a scheme."
"If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the swift+http:// scheme, "
"like so: "
"swift+http://user:[email protected]/v1/container/obj"
)
raise exception.BadStoreUri(uri, reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('swift', 'swift+http', 'swift+https')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
# User can be account:user, in which case cred_parts[0:2] will be
# the account and user. Combine them into a single username of
# account:user
if len(cred_parts) == 1:
reason = (_("Badly formed credentials '%(creds)s' in Swift "
"URI") % locals())
raise exception.BadStoreUri(uri, reason)
elif len(cred_parts) == 3:
user = ':'.join(cred_parts[0:2])
else:
user = cred_parts[0]
key = cred_parts[-1]
self.user = user
self.key = key
else:
self.user = None
path_parts = path.split('/')
try:
self.obj = path_parts.pop()
self.container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
self.authurl = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed Swift URI")
raise exception.BadStoreUri(uri, reason)
@property
def swift_auth_url(self):
"""
Creates a fully-qualified auth url that the Swift client library can
use. The scheme for the auth_url is determined using the scheme
included in the `location` field.
HTTPS is assumed, unless 'swift+http' is specified.
"""
if self.scheme in ('swift+https', 'swift'):
auth_scheme = 'https://'
else:
auth_scheme = 'http://'
full_url = ''.join([auth_scheme, self.authurl])
return full_url
class Store(glance.store.base.Store):
"""An implementation of the swift backend adapter."""
EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>"
CHUNKSIZE = 65536
opts = [
cfg.BoolOpt('swift_enable_snet', default=False),
cfg.StrOpt('swift_store_auth_address'),
cfg.StrOpt('swift_store_user'),
cfg.StrOpt('swift_store_key'),
cfg.StrOpt('swift_store_container',
default=DEFAULT_CONTAINER),
cfg.IntOpt('swift_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE),
cfg.IntOpt('swift_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE),
cfg.BoolOpt('swift_store_create_container_on_put', default=False),
]
def configure(self):
self.conf.register_opts(self.opts)
self.snet = self.conf.swift_enable_snet
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
self.auth_address = self._option_get('swift_store_auth_address')
self.user = self._option_get('swift_store_user')
self.key = self._option_get('swift_store_key')
self.container = self.conf.swift_store_container
try:
# The config file has swift_store_large_object_*size in MB, but
# internally we store it in bytes, since the image_size parameter
# passed to add() is also in bytes.
self.large_object_size = \
self.conf.swift_store_large_object_size * ONE_MB
self.large_object_chunk_size = \
self.conf.swift_store_large_object_chunk_size * ONE_MB
except cfg.ConfigFileValueError, e:
reason = _("Error in configuration conf: %s") % e
logger.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
self.scheme = 'swift+https'
if self.auth_address.startswith('http://'):
self.scheme = 'swift+http'
self.full_auth_address = self.auth_address
elif self.auth_address.startswith('https://'):
self.full_auth_address = self.auth_address
else: # Defaults https
self.full_auth_address = 'https://' + self.auth_address
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
loc = location.store_location
swift_conn = self._make_swift_connection(
auth_url=loc.swift_auth_url, user=loc.user, key=loc.key)
try:
(resp_headers, resp_body) = swift_conn.get_object(
container=loc.container, obj=loc.obj,
resp_chunk_size=self.CHUNKSIZE)
except swift_client.ClientException, e:
if e.http_status == httplib.NOT_FOUND:
uri = location.get_store_uri()
raise exception.NotFound(_("Swift could not find image at "
"uri %(uri)s") % locals())
else:
raise
#if expected_size:
# obj_size = int(resp_headers['content-length'])
# if obj_size != expected_size:
# raise glance.store.BackendException(
# "Expected %s byte file, Swift has %s bytes" %
# (expected_size, obj_size))
return (resp_body, resp_headers.get('content-length'))
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the image_size (or 0
if unavailable)
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
loc = location.store_location
swift_conn = self._make_swift_connection(
auth_url=loc.swift_auth_url, user=loc.user, key=loc.key)
try:
resp_headers = swift_conn.head_object(container=loc.container,
obj=loc.obj)
return resp_headers.get('content-length', 0)
except Exception:
return 0
def _make_swift_connection(self, auth_url, user, key):
"""
Creates a connection using the Swift client library.
"""
snet = self.snet
logger.debug(_("Creating Swift connection with "
"(auth_address=%(auth_url)s, user=%(user)s, "
"snet=%(snet)s)") % locals())
return swift_client.Connection(
authurl=auth_url, user=user, key=key, snet=snet)
def _option_get(self, param):
result = getattr(self.conf, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % locals())
logger.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
return result
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns an `glance.store.ImageAddResult` object
containing information about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval `glance.store.ImageAddResult` object
:raises `glance.common.exception.Duplicate` if the image already
existed
Swift writes the image data using the scheme:
``swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<ID>`
where:
<USER> = ``swift_store_user``
<KEY> = ``swift_store_key``
<AUTH_ADDRESS> = ``swift_store_auth_address``
<CONTAINER> = ``swift_store_container``
<ID> = The id of the image being added
:note Swift auth URLs by default use HTTPS. To specify an HTTP
auth URL, you can specify http://someurl.com for the
swift_store_auth_address config option
:note Swift cannot natively/transparently handle objects >5GB
in size. So, if the image is greater than 5GB, we write
chunks of image data to Swift and then write an manifest
to Swift that contains information about the chunks.
This same chunking process is used by default for images
of an unknown size, as pushing them directly to swift would
fail if the image turns out to be greater than 5GB.
"""
swift_conn = self._make_swift_connection(
auth_url=self.full_auth_address, user=self.user, key=self.key)
create_container_if_missing(self.container, swift_conn, self.conf)
obj_name = str(image_id)
location = StoreLocation({'scheme': self.scheme,
'container': self.container,
'obj': obj_name,
'authurl': self.auth_address,
'user': self.user,
'key': self.key})
logger.debug(_("Adding image object '%(obj_name)s' "
"to Swift") % locals())
try:
if image_size > 0 and image_size < self.large_object_size:
# Image size is known, and is less than large_object_size.
# Send to Swift with regular PUT.
obj_etag = swift_conn.put_object(self.container, obj_name,
image_file,
content_length=image_size)
else:
# Write the image into Swift in chunks.
chunk_id = 1
if image_size > 0:
total_chunks = str(int(
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Automatically initialize and run idealized experiments in WRF
on a single computer or cluster.
@author: <NAME>
"""
import numpy as np
import math
import os
import datetime
import argparse
import glob
import importlib
import inspect
from copy import deepcopy
from pathlib import Path as fopen
import sys
from run_wrf import get_namelist
# %%
def launch_jobs(config_file="config", init=False, outpath=None, exist="s",
debug=False, build=None, use_job_scheduler=False, check_args=False,
pool_jobs=False, mail="ea", wait=False, no_namelist_check=False,
test_run=False, verbose=False, param_combs=None):
"""
Initialize and run idealized WRF experiments.
Refer to README.md for more information.
Parameters
----------
config_file : str, optional
Name of config file in configs folder. The default is "config".
init : bool, optional
Initialize simulations.
outpath : str, optional
Directory for WRF output. Default defined in script. Only effective during initialization.
exist : str, optional
What to do if output already exists: Skip run ('s'), overwrite ('o'),
restart ('r') or backup files ('b'). Default is 's'.
debug : bool, optional
Run wrf in debugging mode. Uses WRF build specified by debug_build in config file.
build : str, optional
WRF build (subdirectory of conf.build_path) to use. Default is None,
which means that one of the build directories specified in the config file is used.
Supersedes argument 'debug'.
use_job_scheduler : bool, optional
Use job scheduler to submit jobs
check_args : bool, optional
Only test python script (no jobs sumitted)
pool_jobs : bool, optional
Gather jobs before submitting with a job scheduler.
Needed if different jobs shall be run on the some node
(potentially filling up the whole node)
mail : str, optional
If using a job scheduler, defines when mail is sent.
Either 'n' for no mails, or a combination of 'b' (beginning of job),
'e' (end), 'a' (abort)', 's' (suspended). Default: 'ea'
wait : bool, optional
Wait until job is finished before submitting the next.
no_namelist_check : bool, optional
Do not perform sanity check of namelist parameters.
test_run : bool, optional
Do short test runs on cluster to find out required runtime and virtual memory
verbose : bool, optional
Verbose mode
param_combs : list of dicts or pandas DataFrame, optional
DataFrame with settings for all configurations.
Returns
-------
param_combs : pandas DataFrame
DataFrame with settings for all configurations.
"""
from run_wrf import tools
if (not init) and (outpath is not None):
print("WARNING: option -o ignored when not in initialization mode!\n")
if wait and use_job_scheduler:
raise ValueError("Waiting for batch jobs is not yet implemented")
if init and (exist == "r"):
raise ValueError("For restart runs no initialization is needed!")
outpath_input = outpath
if config_file[-3:] == ".py":
config_file = config_file[:-3]
try:
conf = importlib.import_module("run_wrf.configs.{}".format(config_file))
except ModuleNotFoundError:
sys.path.append(os.getcwd())
conf = importlib.import_module(config_file)
# change to code path
os.chdir(os.path.abspath(os.path.dirname(__file__)))
if param_combs is None:
if ("param_combs" in dir(conf)) and (conf.param_combs is not None):
param_combs = conf.param_combs
else:
args = []
for k in ["param_grid", "params", "param_names"]:
if k in dir(conf):
args.append(eval("conf.{}".format(k)))
else:
args.append(None)
param_combs = tools.grid_combinations(*args, runID=conf.runID)
param_combs = deepcopy(param_combs)
if test_run:
print("Do short test runs on cluster to find out required runtime and virtual memory\n\n")
if init:
job_name = "init_"
else:
job_name = "run_"
job_name += conf.runID
# temporary log output for job scheduler
if use_job_scheduler:
job_scheduler = conf.job_scheduler.lower()
if job_scheduler not in ["slurm", "sge"]:
raise ValueError("Job scheduler {} not implemented. "
"Use SGE or SLURM".format(job_scheduler))
if job_scheduler == "slurm":
# assuming no oversubscription is allowed, pooling is necessary
if conf.force_pool:
pool_jobs = True
mail_slurm = []
for s, r in zip(["n", "b", "e", "a"], ["NONE", "BEGIN", "END", "FAIL"]):
if s in mail:
mail_slurm.append(r)
mail = ",".join(mail_slurm)
if (conf.mail_address is None) or (conf.mail_address == ""):
raise ValueError("For jobs using {}, provide valid mail address "
"in config file".format(job_scheduler))
if job_scheduler == "slurm":
job_id = "_%j"
else:
job_id = "_$JOB_ID"
else:
job_scheduler = None
conf.request_vmem = False
if pool_jobs and (conf.pool_size is None):
raise ValueError("pool_size cannot be None when using pool_jobs!")
# if test_run and (job_scheduler == "sge"):
# #do test run on one node by using openmpi-xperhost to ensure correct vmem logging
# conf.reduce_pool = True TODO
IDs = []
rtr = []
vmem = []
nslots = []
nxny = []
if init:
print("Initialize WRF simulations")
else:
print("Run WRF simulations")
if ("mpiexec" not in dir(conf)) or (conf.mpiexec is None):
mpiexec = "mpiexec"
print("mpiexec=None or not set in config file. Using system default.")
else:
mpiexec = conf.mpiexec
print("Use mpiexec at {}".format(mpiexec))
print("Configs:")
if "core_param" in param_combs.index:
core_params = param_combs.loc["core_param"]
# delete core_param line and composite_idx lines and columns
composite_idx = param_combs.iloc[-1]
param_combs = param_combs.loc[:, ~composite_idx.astype(bool)]
param_combs = param_combs.iloc[:-2]
# print only core parameters
print(param_combs.loc[:, core_params])
else:
print(param_combs.index.values)
print("-" * 40)
for i, (cname, param_comb) in enumerate(param_combs.iterrows()):
IDi = param_comb["fname"]
args = deepcopy(param_comb.dropna().to_dict())
del args["fname"]
# create output ID for current configuration
run_dir = "{}/WRF_{}".format(args["run_path"], IDi)
print("\n\nConfig: " + IDi)
print(cname)
print("\n")
if ("dy" not in args) and ("dx" in args):
args["dy"] = args["dx"]
# start and end times
date_format = '%Y-%m-%d_%H:%M:%S'
start_time_dt = datetime.datetime.strptime(args["start_time"], date_format)
end_time_dt = datetime.datetime.strptime(args["end_time"], date_format)
start_d, start_t = args["start_time"].split("_")
start_d = start_d.split("-")
start_t = start_t.split(":")
end_d, end_t = args["end_time"].split("_")
end_d = end_d.split("-")
end_t = end_t.split(":")
run_hours = (end_time_dt - start_time_dt).total_seconds() / 3600
if run_hours <= 0:
raise ValueError("Selected end time {} smaller or equal start time {}!".format(
args["end_time"], args["start_time"]))
for di, n in zip(start_d + start_t, ["year", "month", "day", "hour", "minute", "second"]):
args["start_" + n] = di
for di, n in zip(end_d + end_t, ["year", "month", "day", "hour", "minute", "second"]):
args["end_" + n] = di
# use end time not run_*
args["run_hours"] = 0
args["run_minutes"] = 0
args["run_seconds"] = 0
if ("lx" in conf.params) and ("dx" in conf.params):
args["e_we"] = math.ceil(args["lx"] / args["dx"]) + 1
if ("ly" in conf.params) and ("dy" in conf.params):
args["e_sn"] = math.ceil(args["ly"] / args["dy"]) + 1
# slots
nx = tools.find_nproc(args["e_we"] - 1,
min_n_per_proc=args["min_nx_per_proc"])
ny = tools.find_nproc(args["e_sn"] - 1,
min_n_per_proc=args["min_ny_per_proc"])
if ("max_nslotsx" in args) and (args["max_nslotsx"] is not None):
nx = min(args["max_nslotsx"], nx)
if ("max_nslotsy" in args) and (args["max_nslotsy"] is not None):
ny = min(args["max_nslotsy"], ny)
if (nx == 1) and (ny == 1):
nx = -1
ny = -1
nslotsi = nx * ny
# determine which build to use
slot_comm = ""
parallel = False
if (np.array([*nslots, nslotsi]) > 1).any():
parallel = True
if use_job_scheduler and (not pool_jobs):
if job_scheduler == "sge":
slot_comm = "-pe openmpi-fillup {}".format(nslotsi)
elif job_scheduler == "slurm":
slot_comm = "-N {}".format(nslotsi)
# set WRF build to use
if build is not None:
wrf_dir_i = build
elif debug:
wrf_dir_i = args["debug_build"]
elif parallel:
wrf_dir_i = args["parallel_build"]
else:
wrf_dir_i = args["serial_build"]
wrf_build = "{}/{}".format(args["build_path"], wrf_dir_i)
print("Setting namelist parameters\n")
namelist_path = "{}/test/{}/namelist.input".format(wrf_build, args["ideal_case_name"])
namelist_all = get_namelist.namelist_to_dict(namelist_path, build_path=wrf_build,
registries=conf.registries)
namelist = get_namelist.namelist_to_dict(namelist_path)
if use_job_scheduler:
batch_log_dir = args["run_path"] + "/logs/"
os.makedirs(batch_log_dir, exist_ok=True)
queue = conf.queue
vmemi = None
if init:
print("Using WRF build in: {}\n".format(wrf_build))
args, args_str = tools.prepare_init(args, conf, namelist, namelist_all,
namelist_check=not no_namelist_check)
# job scheduler queue and vmem
if use_job_scheduler and conf.request_vmem:
vmem_init = args["vmem_init"]
if ("bigmem_limit" in dir(conf)) and (vmem_init > conf.bigmem_limit):
queue = conf.bigmem_queue
elif use_job_scheduler or test_run:
if "dt_f" not in args:
args["dt_f"] = namelist_all["time_step"] + \
namelist_all["time_step_fract_num"] / namelist_all["time_step_fract_den"]
args, skip = tools.set_vmem_rt(args, run_dir, conf, run_hours, nslots=nslotsi,
pool_jobs=pool_jobs, test_run=test_run,
request_vmem=conf.request_vmem)
if skip:
continue
if conf.request_vmem:
vmemi = args["vmem"]
# not needed; just for completeness of dataframe:
args["nx"] = nx
args["ny"] = ny
args["nslots"] = nslotsi
args["run_dir"] = run_dir
for arg, val in args.items():
if arg not in param_combs.keys():
param_combs[arg] = None
param_combs[arg][cname] = val
if outpath_input is None:
base_outpath = args["outpath"]
else:
base_outpath = outpath_input
n_rep = args.setdefault("n_rep", 1)
for rep in range(n_rep): # repetion loop
vmem.append(vmemi)
nslots.append(nslotsi)
nxny.append([nx, ny])
IDr = IDi + "_" + str(rep)
run_dir_r = run_dir + "_" + str(rep)
# create output path
outpath = os.path.join(base_outpath, IDr, "") # WRF output path
if not os.path.isdir(outpath):
os.makedirs(outpath)
outpath_esc = outpath.replace("/", "\/") # need to escape slashes
if init:
if os.path.isdir(run_dir_r):
print("Run directory already exists.")
if exist == "s":
if os.path.isfile(run_dir_r + "/wrfinput_d01"):
print("Initialization was complete.\nSkipping...")
continue
else:
print("However, WRF initialization | |
"external_name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceSpecParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceSpecParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="serviceClassType")
def service_class_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_class_type")
@service_class_type.setter
def service_class_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_class_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class ServiceSpecContextArgs:
def __init__(__self__, *,
org: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
resourcegroup: Optional[pulumi.Input[str]] = None,
resourcegroupid: Optional[pulumi.Input[str]] = None,
resourcelocation: Optional[pulumi.Input[str]] = None,
space: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
ResourceContext defines the CloudFoundry context and resource group
"""
if org is not None:
pulumi.set(__self__, "org", org)
if region is not None:
pulumi.set(__self__, "region", region)
if resourcegroup is not None:
pulumi.set(__self__, "resourcegroup", resourcegroup)
if resourcegroupid is not None:
pulumi.set(__self__, "resourcegroupid", resourcegroupid)
if resourcelocation is not None:
pulumi.set(__self__, "resourcelocation", resourcelocation)
if space is not None:
pulumi.set(__self__, "space", space)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def org(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "org")
@org.setter
def org(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "org", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def resourcegroup(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resourcegroup")
@resourcegroup.setter
def resourcegroup(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resourcegroup", value)
@property
@pulumi.getter
def resourcegroupid(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resourcegroupid")
@resourcegroupid.setter
def resourcegroupid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resourcegroupid", value)
@property
@pulumi.getter
def resourcelocation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resourcelocation")
@resourcelocation.setter
def resourcelocation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resourcelocation", value)
@property
@pulumi.getter
def space(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "space")
@space.setter
def space(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "space", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class ServiceSpecParametersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, Any]]]]] = None,
value: Optional[Any] = None,
value_from: Optional[pulumi.Input['ServiceSpecParametersValueFromArgs']] = None):
"""
Param represents a key-value pair
:param pulumi.Input[str] name: Name representing the key.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, Any]]]] attributes: A parameter may have attributes (e.g. message hub topic might have partitions)
:param Any value: Defaults to null.
:param pulumi.Input['ServiceSpecParametersValueFromArgs'] value_from: Source for the value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if attributes is not None:
pulumi.set(__self__, "attributes", attributes)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name representing the key.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, Any]]]]]:
"""
A parameter may have attributes (e.g. message hub topic might have partitions)
"""
return pulumi.get(self, "attributes")
@attributes.setter
def attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, Any]]]]]):
pulumi.set(self, "attributes", value)
@property
@pulumi.getter
def value(self) -> Optional[Any]:
"""
Defaults to null.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[Any]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['ServiceSpecParametersValueFromArgs']]:
"""
Source for the value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['ServiceSpecParametersValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class ServiceSpecParametersValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['ServiceSpecParametersValueFromConfigMapKeyRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['ServiceSpecParametersValueFromSecretKeyRefArgs']] = None):
"""
Source for the value. Cannot be used if value is not empty.
:param pulumi.Input['ServiceSpecParametersValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['ServiceSpecParametersValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the resource namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['ServiceSpecParametersValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['ServiceSpecParametersValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['ServiceSpecParametersValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the resource namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['ServiceSpecParametersValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class ServiceSpecParametersValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ServiceSpecParametersValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the resource namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ServiceStatusArgs:
def __init__(__self__, *,
plan: pulumi.Input[str],
service_class: pulumi.Input[str],
service_class_type: pulumi.Input[str],
context: Optional[pulumi.Input['ServiceStatusContextArgs']] = None,
dashboard_url: Optional[pulumi.Input[str]] = None,
external_name: Optional[pulumi.Input[str]] = None,
generation: Optional[pulumi.Input[int]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceStatusParametersArgs']]]] = None,
state: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
ServiceStatus defines the observed state of Service
:param pulumi.Input['ServiceStatusContextArgs'] context: ResourceContext defines the CloudFoundry context and resource group
"""
pulumi.set(__self__, "plan", plan)
pulumi.set(__self__, "service_class", service_class)
pulumi.set(__self__, "service_class_type", service_class_type)
if context is not None:
pulumi.set(__self__, "context", context)
if dashboard_url is not None:
pulumi.set(__self__, "dashboard_url", dashboard_url)
if external_name is not None:
pulumi.set(__self__, "external_name", external_name)
if generation is not None:
pulumi.set(__self__, "generation", generation)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if message is not None:
pulumi.set(__self__, "message", message)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if state is not None:
pulumi.set(__self__, "state", state)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def plan(self) -> pulumi.Input[str]:
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: pulumi.Input[str]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter(name="serviceClass")
def service_class(self) -> pulumi.Input[str]:
return pulumi.get(self, "service_class")
@service_class.setter
def service_class(self, value: pulumi.Input[str]):
pulumi.set(self, "service_class", value)
@property
@pulumi.getter(name="serviceClassType")
def service_class_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "service_class_type")
@service_class_type.setter
def service_class_type(self, value: pulumi.Input[str]):
pulumi.set(self, "service_class_type", value)
@property
@pulumi.getter
def context(self) -> Optional[pulumi.Input['ServiceStatusContextArgs']]:
"""
ResourceContext defines the CloudFoundry context and resource group
"""
return pulumi.get(self, "context")
@context.setter
def context(self, value: Optional[pulumi.Input['ServiceStatusContextArgs']]):
pulumi.set(self, "context", value)
@property
@pulumi.getter(name="dashboardURL")
def dashboard_url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "dashboard_url")
@dashboard_url.setter
def dashboard_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_url", value)
@property
@pulumi.getter(name="externalName")
def external_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "external_name")
@external_name.setter
def external_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_name", value)
@property
@pulumi.getter
def generation(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "generation")
@generation.setter
def generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "generation", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceStatusParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceStatusParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
| |
= fake_calls[:]
# set testing
self.for_testing = isinstance(test_size, int)
# setup driver type
self.driver_type = driver_type
# and pinned
self.use_pinned = use_pinned
# mark owners
self.owner = None
# validation
self.for_validation = False
def __mark(dep):
for x in dep.depends_on:
x.owner = dep
__mark(x)
__mark(self)
# the base skeleton for sub kernel creation
self.skeleton = textwrap.dedent(
"""
for j
${pre}
for ${var_name}
${main}
end
${post}
end
""")
if self.loopy_opts.pre_split:
# pre split skeleton
self.skeleton = textwrap.dedent(
"""
for j_outer
for j_inner
${pre}
for ${var_name}
${main}
end
${post}
end
end
""")
@property
def name(self):
"""
Return the name of this kernel generator, based on :attr:`kernel_type
"""
if self.kernel_type == KernelType.dummy:
return self._name
return utils.enum_to_string(self.kernel_type)
@property
def unique_pointers(self):
"""
Return True IFF the user specified the :attr:`loopy_opts.unique_pointers`
"""
return self.loopy_opts.unique_pointers
@property
def work_size(self):
"""
Returns either the integer :attr:`loopy_opts.work_size` (if specified by
user) or the name of the `work_size` variable
"""
if self.unique_pointers:
return self.vec_width if self.vec_width else 1
return w_size.name
@property
def target_preambles(self):
"""
Preambles based on the target language
Returns
-------
premables: list of str
The string preambles for this :class:`kernel_generator`
"""
return []
@property
def vec_width(self):
"""
Returns the vector width of this :class:`kernel_generator`
"""
if self.loopy_opts.depth:
return self.loopy_opts.depth
if self.loopy_opts.width:
return self.loopy_opts.width
return 0
@property
def hoist_locals(self):
"""
If true (e.g., in a subclass), this type of generator requires that local
memory be hoisted up to / defined in the type-level kernel.
This is typically the case for languages such as OpenCL and CUDA, but not
C / OpenMP
"""
return False
@property
def file_prefix(self):
"""
Prefix for filenames based on autodifferentiaton status
"""
file_prefix = ''
if self.auto_diff:
file_prefix = 'ad_'
return file_prefix
def apply_barriers(self, instructions, barriers=None):
"""
A method stud that can be overriden to apply synchonization barriers
to vectorized code
Parameters
----------
instructions: list of str
The instructions for this kernel
barriers: list of (int, int)
The integer indicies between which to insert instructions
If not supplied, :attr:`barriers` will be used
Returns
-------
instructions : list of str
The instructions passed in
"""
return instructions
def get_assumptions(self, test_size, for_driver=False):
"""
Returns a list of assumptions on the loop domains
of generated subkernels
Parameters
----------
test_size : int or str
In testing, this should be the integer size of the test data
For production, this should the 'test_size' (or the corresponding)
for the variable test size passed to the kernel
for_driver: bool [False]
If this kernel is a driver function
Returns
-------
assumptions : list of str
List of assumptions to apply to the generated sub kernel
"""
return []
def get_inames(self, test_size, for_driver=False):
"""
Returns the inames and iname_ranges for subkernels created using
this generator
Parameters
----------
test_size : int or str
In testing, this should be the integer size of the test data
For production, this should the 'test_size' (or the corresponding)
for the variable test size passed to the kernel
for_driver : bool [False]
If True, utilize the entire test size
Returns
-------
inames : list of str
The string inames to add to created subkernels by default
iname_domains : list of str
The iname domains to add to created subkernels by default
"""
# need to implement a pre-split, to avoid loopy mangling the inner / outer
# parallel inames
pre_split = self.loopy_opts.pre_split
gind = global_ind
if not self.for_testing:
# if we're not testing, or in a driver function the kernel must only be
# executed once, as the loop over the work-size has been lifted to the
# driver kernels
test_size = self.loopy_opts.initial_condition_loopsize
if pre_split:
gind += '_outer'
inames = [gind]
domains = ['0 <= {} < {}'.format(gind, test_size)]
if self.loopy_opts.pre_split:
if self.for_testing or self.unique_pointers:
# reduced test size
test_size = int(test_size / self.vec_width)
# add/fixup dummy j_inner domain
lind = global_ind + '_inner'
inames[-1] = (gind, lind)
domains[-1] = ('0 <= {lind} < {vw} and '
'0 <= {gind} < {end}'.format(
lind=lind, gind=gind, end=test_size,
vw=self.vec_width))
return inames, domains
def add_depencencies(self, k_gens):
"""
Adds the supplied :class:`kernel_generator`s to this
one's dependency list. Functionally this means that this kernel
generator will know how to compile and execute functions
from the dependencies
Parameters
----------
k_gens : list of :class:`kernel_generator`
The dependencies to add to this kernel
"""
self.depends_on.extend(k_gens)
def _with_target(self, kernel_arg, for_atomic=False):
"""
Returns a copy of :param:`kernel_arg` with it's :attr:`dtype.target` set
for proper pickling
Parameters
----------
kernel_arg: :class:`loopy.KernelArgument`
The argument to convert
for_atomic: bool [False]
If true, convert to an :class:`AtomicNumpyType`
Returns
-------
updated: :class:`loopy.KernelArgument`
The argument with correct target set in the dtype
"""
return kernel_arg.copy(
dtype=to_loopy_type(kernel_arg.dtype, for_atomic=for_atomic,
target=self.target).with_target(self.target))
def _make_kernels(self, kernels=[], **kwargs):
"""
Turns the supplied kernel infos into loopy kernels,
and vectorizes them!
Parameters
----------
None
Returns
-------
kernels: list of :class:`loopy.LoopKernel`
"""
use_ours = False
if not kernels:
use_ours = True
kernels = self.kernels
# now create the kernels!
for i, info in enumerate(kernels):
# if external, or already built
if isinstance(info, lp.LoopKernel):
continue
# create kernel from k_gen.knl_info
kernels[i] = self.make_kernel(info, self.target, self.test_size,
for_driver=kwargs.get('for_driver', False))
# apply vectorization
kernels[i] = self.apply_specialization(
self.loopy_opts,
info.var_name,
kernels[i],
vecspec=info.vectorization_specializer,
can_vectorize=info.can_vectorize,
unrolled_vector=info.unrolled_vector)
dont_split = kwargs.get('dont_split', [])
# update the kernel args
kernels[i] = self.array_split.split_loopy_arrays(
kernels[i], dont_split=dont_split)
if info.split_specializer:
kernels[i] = info.split_specializer(kernels[i])
# and add a mangler
# func_manglers.append(create_function_mangler(kernels[i]))
# set the editor
kernels[i] = lp_utils.set_editor(kernels[i])
# need to call make_kernels on dependencies
for x in self.depends_on:
if use_ours:
x._make_kernels()
return kernels
def __copy_deps(self, scan_path, out_path, change_extension=True):
"""
Convenience function to copy the dependencies of this
:class:`kernel_generator` to our own output path
Parameters
----------
scan_path : str
The path the dependencies were written to
out_path : str
The path this generator is writing to
change_ext : bool
If True, any dependencies that do not end with the proper file
extension, see :any:`utils.file_ext`
"""
deps = [x for x in os.listdir(scan_path) if os.path.isfile(
os.path.join(scan_path, x)) and not x.endswith('.in')]
for dep in deps:
dep_dest = dep
dep_is_header = dep.endswith(utils.header_ext['c'])
ext = (utils.file_ext[self.lang] if not dep_is_header
else utils.header_ext[self.lang])
if change_extension and not dep.endswith(ext):
dep_dest = dep[:dep.rfind('.')] + ext
shutil.copyfile(os.path.join(scan_path, dep),
os.path.join(out_path, dep_dest))
def order_kernel_args(self, args):
"""
Returns the ordered kernel arguments for this :class:`kernel_generator`
"""
sorting_args = self.in_arrays + self.out_arrays
return utils.kernel_argument_ordering(args, self.kernel_type,
for_validation=self.for_validation,
dummy_args=sorting_args)
def generate(self, path, data_order=None, data_filename='data.bin',
for_validation=False, species_names=[], rxn_strings=[]):
"""
Generates wrapping kernel, compiling program (if necessary) and
calling / executing program for this kernel
Parameters
----------
path : str
The output path
data_order : {'C', 'F'}
If specified, the ordering of the binary input data
which may differ from the loopy order
data_filename : Optional[str]
If specified, the path to the data file for reading / execution
via the command line
for_validation: bool [False]
If True, this kernel is being generated to validate pyJac, hence we need
to save output data to a file
species_names: list of str
The list of species in the model
rxn_strings: list of str
Stringified versions of the reactions in the model
Returns
-------
None
"""
self.for_validation = for_validation
utils.create_dir(path)
self._make_kernels()
callgen, record, result = self._generate_wrapping_kernel(path)
callgen = self._generate_driver_kernel(path, record, result, callgen)
callgen = self._generate_compiling_program(path, callgen)
_, callgen = self._generate_calling_program(
path, data_filename, callgen, record, for_validation=for_validation,
species_names=species_names, rxn_strings=rxn_strings)
self._generate_calling_header(path, callgen)
self._generate_common(path, record)
# finally, copy any dependencies to the path
lang_dir = os.path.join(script_dir, self.lang)
self.__copy_deps(lang_dir, path, change_extension=False)
def _generate_common(self, path, record):
"""
Creates the common files (used by all target languages) for this
kernel generator
Parameters
----------
path : str
The output path for the common files
record: :class:`MemoryGenerationResult`
The memory storage generated for this kernel
Returns
-------
None
"""
inputs = [x for x in record.args if x.name in self.in_arrays]
# create readgen
readgen = ReadgenRecord(
lang=self.loopy_opts.lang,
type_map=self.type_map,
order=self.loopy_opts.order,
inputs=inputs)
# serialize
readout = os.path.join(path, 'readgen.pickle')
with open(readout, 'wb') as file:
pickle.dump(readgen, file)
def run(input, output):
# cogify
try:
Cog().callableMain([
'cogapp', '-e', '-d', '-Dreadgen={}'.format(readout),
'-o', output, input])
except Exception:
logger = logging.getLogger(__name__)
logger.error('Error generating initial conditions reader:'
| |
# Controller.py
# author: <NAME>
# Adapted from <NAME>' code here: http://pydoc.net/Python/jaraco.input/1.0.1/jaraco.input.win32.xinput/ under the MIT licence terms
# last update: 8/14/2018
# Description:
# Provides XBOX controller interface to communicate with DeepNNCar
# BUTTON LAYOUT
# Right trigger => acceleration forward
# Left trigger => acceleration reverse
# Left joystick => movement left/right
# X button => Data collection mode
# A button => Autonomous driving mode
# B button => STOP signal
# Y button => live stream mode
# Up (directional pad) => increment ref speed of PID controller
# Down (directional pad) => decrement ref speed of PID controller
import ctypes
import sys
import time
import socket
from threading import Thread
import threading
import logging
import sys
from operator import itemgetter, attrgetter
from itertools import count, starmap
from pyglet import event
import pyformulas as pf
import matplotlib.pyplot as plt
import numpy as np
import time
import math
import Client
import plotTool
# IMMUTABLE
liveStreamPort = 5002
mainPort = 5001
idle_dcTuple = (
15, 15) # (steering,acceleration) # note steering may need to be adjusted, yet the range should stay at 10.
dcTuple = idle_dcTuple
# MUTABLE
server_address = ('10.66.204.190', mainPort)#204
forward_Range = 1.0 # %python
reverse_Range = 0.8 # %
steering_Range = 12 # %
constantDCEnabled =False
steeringBasedDCEnabled = False
steeringBasedSpeedModeConstant = 0.05
# PID Parameters
cruiseControlEnabled = False
setSpeed = 0.25 # m/s
maxSetSpeed = 1.5
delta = 0.05 # increment/decrement speed by delta using D pad (user controlled)
KP = 0.013
KI = 0.0001
KD = 0.0002
class XINPUT_GAMEPAD(ctypes.Structure):
_fields_ = [
('buttons', ctypes.c_ushort), # wButtons
('left_trigger', ctypes.c_ubyte), # bLeftTrigger
('right_trigger', ctypes.c_ubyte), # bLeftTrigger
('l_thumb_x', ctypes.c_short), # sThumbLX
('l_thumb_y', ctypes.c_short), # sThumbLY
('r_thumb_x', ctypes.c_short), # sThumbRx
('r_thumb_y', ctypes.c_short), # sThumbRy
]
class XINPUT_STATE(ctypes.Structure):
_fields_ = [
('packet_number', ctypes.c_ulong), # dwPacketNumber
('gamepad', XINPUT_GAMEPAD), # Gamepad
]
class XINPUT_VIBRATION(ctypes.Structure):
_fields_ = [("wLeftMotorSpeed", ctypes.c_ushort),
("wRightMotorSpeed", ctypes.c_ushort)]
class XINPUT_BATTERY_INFORMATION(ctypes.Structure):
_fields_ = [("BatteryType", ctypes.c_ubyte),
("BatteryLevel", ctypes.c_ubyte)]
xinput = ctypes.windll.xinput1_4
def struct_dict(struct):
get_pair = lambda field_type: (
field_type[0], getattr(struct, field_type[0]))
return dict(list(map(get_pair, struct._fields_)))
def get_bit_values(number, size=32):
res = list(gen_bit_values(number))
res.reverse()
# 0-pad the most significant bit
res = [0] * (size - len(res)) + res
return res
def gen_bit_values(number):
number = int(number)
while number:
yield number & 0x1
number >>= 1
ERROR_DEVICE_NOT_CONNECTED = 1167
ERROR_SUCCESS = 0
class XInputJoystick(event.EventDispatcher):
max_devices = 4
dcTuple = idle_dcTuple
def get_dcTuple(self):
return XInputJoystick.dcTuple
def __init__(self, device_number, normalize_axes=True):
values = vars()
del values['self']
self.__dict__.update(values)
super(XInputJoystick, self).__init__()
self._last_state = self.get_state()
self.received_packets = 0
self.missed_packets = 0
# Set the method that will be called to normalize
# the values for analog axis.
choices = [self.translate_identity, self.translate_using_data_size]
self.translate = choices[normalize_axes]
def translate_using_data_size(self, value, data_size):
# normalizes analog data to [0,1] for unsigned data
# and [-0.5,0.5] for signed data
data_bits = 8 * data_size
return float(value) / (2 ** data_bits - 1)
def translate_identity(self, value, data_size=None):
return value
def get_state(self):
"Get the state of the controller represented by this object"
state = XINPUT_STATE()
res = xinput.XInputGetState(self.device_number, ctypes.byref(state))
if res == ERROR_SUCCESS:
return state
if res != ERROR_DEVICE_NOT_CONNECTED:
raise RuntimeError(
"Unknown error %d attempting to get state of device %d" % (res, self.device_number))
# else return None (device is not connected)
def is_connected(self):
return self._last_state is not None
@staticmethod
def enumerate_devices():
"Returns the devices that are connected"
devices = list(
map(XInputJoystick, list(range(XInputJoystick.max_devices))))
return [d for d in devices if d.is_connected()]
def set_vibration(self, left_motor, right_motor):
"Control the speed of both motors seperately"
# Set up function argument types and return type
XInputSetState = xinput.XInputSetState
XInputSetState.argtypes = [ctypes.c_uint, ctypes.POINTER(XINPUT_VIBRATION)]
XInputSetState.restype = ctypes.c_uint
vibration = XINPUT_VIBRATION(
int(left_motor * 65535), int(right_motor * 65535))
XInputSetState(self.device_number, ctypes.byref(vibration))
def get_battery_information(self):
"Get battery type & charge level"
BATTERY_DEVTYPE_GAMEPAD = 0x00
BATTERY_DEVTYPE_HEADSET = 0x01
# Set up function argument types and return type
XInputGetBatteryInformation = xinput.XInputGetBatteryInformation
XInputGetBatteryInformation.argtypes = [ctypes.c_uint, ctypes.c_ubyte,
ctypes.POINTER(XINPUT_BATTERY_INFORMATION)]
XInputGetBatteryInformation.restype = ctypes.c_uint
battery = XINPUT_BATTERY_INFORMATION(0, 0)
XInputGetBatteryInformation(self.device_number, BATTERY_DEVTYPE_GAMEPAD, ctypes.byref(battery))
batt_type = "Unknown" if battery.BatteryType == 0xFF else ["Disconnected", "Wired", "Alkaline", "Nimh"][
battery.BatteryType]
level = ["Empty", "Low", "Medium", "Full"][battery.BatteryLevel]
return batt_type, level
def dispatch_events(self):
"The main event loop for a joystick"
state = self.get_state()
if not state:
raise RuntimeError(
"Joystick %d is not connected" % self.device_number)
if state.packet_number != self._last_state.packet_number:
# state has changed, handle the change
self.update_packet_count(state)
self.handle_changed_state(state)
self._last_state = state
def update_packet_count(self, state):
"Keep track of received and missed packets for performance tuning"
self.received_packets += 1
missed_packets = state.packet_number - \
self._last_state.packet_number - 1
if missed_packets:
self.dispatch_event('on_missed_packet', missed_packets)
self.missed_packets += missed_packets
def handle_changed_state(self, state):
"Dispatch various events as a result of the state changing"
self.dispatch_event('on_state_changed', state)
self.dispatch_axis_events(state)
self.dispatch_button_events(state)
def dispatch_axis_events(self, state):
dcTuple = idle_dcTuple
# axis fields are everything but the buttons
axis_fields = dict(XINPUT_GAMEPAD._fields_)
axis_fields.pop('buttons')
for axis, type in list(axis_fields.items()):
old_val = getattr(self._last_state.gamepad, axis)
new_val = getattr(state.gamepad, axis)
data_size = ctypes.sizeof(type)
old_val = self.translate(old_val, data_size)
new_val = self.translate(new_val, data_size)
if ((old_val != new_val and (new_val > 0.08000000000000000 or new_val < -0.08000000000000000) and abs(
old_val - new_val) > 0.00000000500000000) or
(axis == 'right_trigger' or axis == 'left_trigger') and new_val == 0 and abs(
old_val - new_val) > 0.00000000500000000):
self.dispatch_event('on_axis', axis, new_val)
if (axis == 'right_trigger'):
dcTuple = (dcTuple[0],
dcTuple[1] + new_val * forward_Range)
if (axis == 'left_trigger'):
dcTuple = (dcTuple[0],
dcTuple[1] - new_val * reverse_Range)
if (axis == 'l_thumb_x'):
dcTuple = (dcTuple[0] + new_val * steering_Range,
dcTuple[1])
if (dcTuple[0] > 20):
dcTuple = (20, dcTuple[1])
if (dcTuple[0] < 10):
dcTuple = (10, dcTuple[1])
XInputJoystick.dcTuple = dcTuple
def dispatch_button_events(self, state):
changed = state.gamepad.buttons ^ self._last_state.gamepad.buttons
changed = get_bit_values(changed, 16)
buttons_state = get_bit_values(state.gamepad.buttons, 16)
changed.reverse()
buttons_state.reverse()
button_numbers = count(1)
changed_buttons = list(
filter(itemgetter(0), list(zip(changed, button_numbers, buttons_state))))
tuple(starmap(self.dispatch_button_event, changed_buttons))
def dispatch_button_event(self, changed, number, pressed):
self.dispatch_event('on_button', number, pressed)
# stub methods for event handlers
def on_state_changed(self, state):
pass
def on_axis(self, axis, value):
pass
def on_button(self, button, pressed):
pass
def on_missed_packet(self, number):
pass
list(map(XInputJoystick.register_event_type, [
'on_state_changed',
'on_axis',
'on_button',
'on_missed_packet',
]))
# printMenu()
# Description: Displays buttons to press
def printMenu():
print('Select a mode...')
print('\tX:\tBegin collecting and recording data.')
print('\tA:\tBegin driving autonomously.')
print('\tB:\tSTOP signal that terminates program.')
print('\tY:\tBegin live streaming.')
# liveStreamThread()
# Description: Creates the livestream thread to receive images from the RPi
# Parameter: stop_event => an event-handler that when set to false indicates to break the thread
# j => the joystick object that allows the current direction to be displayed on the camera.
def liveStreamThread(stop_event, j):
global setSpeed
logging.basicConfig(filename='scheduler.log', level=logging.DEBUG, filemode='w')
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock2.connect((server_address[0], liveStreamPort))
sock2.settimeout(10)
nullEvents = 0
while not stop_event.is_set() and nullEvents < 20:
dcTuple = j.get_dcTuple()
(nullEvents) = Client.liveStreamReceiver(sock2, dcTuple, nullEvents)
sock2.close()
# plotVelocity
# Creates a thread event that displays a dynamic plot of the speed.
def plotVelocity():
print('animating')
plotTool.animateGraph()
# assertSafety()
# Description: asserts safety of environment
# Parameters: dcTuple => (steering,acc) duty cycle
def assertSafety(dcTuple):
global setSpeed, maxSetSpeed, forward_Range, reverse_Range, idle_dcTuple
# assert(idle_dcTuple[1] + forward_Range >= dcTuple[1]),'forward_Range UNSAFE: duty cycle should not exceed 17%%. If error message incorrect, comment out.'
# assert(idle_dcTuple[1] - reverse_Range <= dcTuple[1]),'reverse_Range UNSAFE: duty cycle should not decrease past 13%%. If error message incorrect, comment out.'
assert (
setSpeed < maxSetSpeed), 'set_speed UNSAFE: set_speed should not exceed 1.5 m/s. If error message incorrect, comment out.'
assert (maxSetSpeed < 2), 'Max set speed UNSAFE: Should not exceed 1.5. Comment out if warning unnecessary.'
# ControlDeepNNCar()
# Description: Dispatches XBOX events and sends messages to DeepNNCar
# Parameter: server_address => port 5001
def ControlDeepNNCar(server_address):
# initialize joysticks
joysticks = XInputJoystick.enumerate_devices()
device_numbers = list(map(attrgetter('device_number'), joysticks))
print('found %d devices: %s' % (len(joysticks), device_numbers))
if not joysticks:
sys.exit(0)
j = joysticks[0]
print('using %d' % j.device_number)
Client.initializeConnection(server_address, idle_dcTuple, steering_Range)
@j.event
def on_button(button, pressed):
global chosenMode, killThread, pill2kill, setSpeed, delta, forward_Range
# X => collect duty cycle and image data
if (button == 15 and pressed == 1 and not chosenMode):
print('Sending COLLECTING DATA signal...')
signal = (1,)
chosenMode = True
message = Client.send(signal, server_address)
print(message)
# A => turn on autonomous driving mode
elif (button == 13 and pressed == 1 and not chosenMode):
print('Sending AUTONOMOUS DRIVE signal...')
signal = (2,)
chosenMode = True
message = Client.send(signal, server_address)
print(message)
# Y => Turn on live feed
elif (button == 16 and pressed == 1 and not chosenMode):
pill2kill = threading.Event()
thread = Thread(target=liveStreamThread, args=(pill2kill, j,))
thread.start()
signal = (3,)
chosenMode = True
killThread = True
# send the signal and print the message
message = Client.send(signal, server_address)
print(message)
# B => Stop
elif (button | |
vocab_size : int
The vocabulary size.
Examples
--------
>>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()
References
---------------
- ``tensorflow.models.rnn.ptb import reader``
- `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__
Notes
------
- If you want to get the raw data, see the source code.
"""
path = os.path.join(path, 'ptb')
logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path))
# Maybe dowload and uncompress tar, or load exsisting files
filename = 'simple-examples.tgz'
url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
maybe_download_and_extract(filename, path, url, extract=True)
data_path = os.path.join(path, 'simple-examples', 'data')
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = nlp.build_vocab(nlp.read_words(train_path))
train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)
valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)
test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)
vocab_size = len(word_to_id)
# logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']
# logging.info(train_data) # ... 214, 5, 23, 1, 2]
# logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }
# logging.info(vocabulary) # 10000
# exit()
return train_data, valid_data, test_data, vocab_size
def load_matt_mahoney_text8_dataset(path='data'):
"""Load <NAME>'s dataset.
Download a text file from <NAME>'s website
if not present, and make sure it's the right size.
Extract the first file enclosed in a zip file as a list of words.
This dataset can be used for Word Embedding.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/mm_test8/``.
Returns
--------
list of str
The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> print('Data size', len(words))
"""
path = os.path.join(path, 'mm_test8')
logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path))
filename = 'text8.zip'
url = 'http://mattmahoney.net/dc/'
maybe_download_and_extract(filename, path, url, expected_bytes=31344016)
with zipfile.ZipFile(os.path.join(path, filename)) as f:
word_list = f.read(f.namelist()[0]).split()
for idx, _ in enumerate(word_list):
word_list[idx] = word_list[idx].decode()
return word_list
def load_imdb_dataset(
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
index_from=3
):
"""Load IMDB dataset.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/imdb/``.
nb_words : int
Number of words to get.
skip_top : int
Top most frequent words to ignore (they will appear as oov_char value in the sequence data).
maxlen : int
Maximum sequence length. Any longer sequence will be truncated.
seed : int
Seed for reproducible data shuffling.
start_char : int
The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character.
oov_char : int
Words that were cut out because of the num_words or skip_top limit will be replaced with this character.
index_from : int
Index actual words with this index and higher.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(
... nb_words=20000, test_split=0.2)
>>> print('X_train.shape', X_train.shape)
(20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]
>>> print('y_train.shape', y_train.shape)
(20000,) [1 0 0 ..., 1 0 1]
References
-----------
- `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`__
"""
path = os.path.join(path, 'imdb')
filename = "imdb.pkl"
url = 'https://s3.amazonaws.com/text-datasets/'
maybe_download_and_extract(filename, path, url)
if filename.endswith(".gz"):
f = gzip.open(os.path.join(path, filename), 'rb')
else:
f = open(os.path.join(path, filename), 'rb')
X, labels = cPickle.load(f)
f.close()
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not X:
raise Exception(
'After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. '
'Increase maxlen.'
)
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
X_train = np.array(X[:int(len(X) * (1 - test_split))])
y_train = np.array(labels[:int(len(X) * (1 - test_split))])
X_test = np.array(X[int(len(X) * (1 - test_split)):])
y_test = np.array(labels[int(len(X) * (1 - test_split)):])
return X_train, y_train, X_test, y_test
def load_nietzsche_dataset(path='data'):
"""Load Nietzsche dataset.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/nietzsche/``.
Returns
--------
str
The content.
Examples
--------
>>> see tutorial_generate_text.py
>>> words = tl.files.load_nietzsche_dataset()
>>> words = basic_clean_str(words)
>>> words = words.split()
"""
logging.info("Load or Download nietzsche dataset > {}".format(path))
path = os.path.join(path, 'nietzsche')
filename = "nietzsche.txt"
url = 'https://s3.amazonaws.com/text-datasets/'
filepath = maybe_download_and_extract(filename, path, url)
with open(filepath, "r") as f:
words = f.read()
return words
def load_wmt_en_fr_dataset(path='data'):
"""Load WMT'15 English-to-French translation dataset.
It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.
Returns the directories of training data and test data.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.
References
----------
- Code modified from /tensorflow/models/rnn/translation/data_utils.py
Notes
-----
Usually, it will take a long time to download this dataset.
"""
path = os.path.join(path, 'wmt_en_fr')
# URLs for WMT data.
_WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/"
_WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/"
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path."""
logging.info("Unpacking %s to %s" % (gz_path, new_path))
with gzip.open(gz_path, "rb") as gz_file:
with open(new_path, "wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_wmt_enfr_train_set(path):
"""Download the WMT en-fr training corpus to directory unless it's there."""
filename = "training-giga-fren.tar"
maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)
train_path = os.path.join(path, "giga-fren.release2.fixed")
gunzip_file(train_path + ".fr.gz", train_path + ".fr")
gunzip_file(train_path + ".en.gz", train_path + ".en")
return train_path
def get_wmt_enfr_dev_set(path):
"""Download the WMT en-fr training corpus to directory unless it's there."""
filename = "dev-v2.tgz"
dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)
dev_name = "newstest2013"
dev_path = os.path.join(path, "newstest2013")
if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
logging.info("Extracting tgz file %s" % dev_file)
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix.
en_dev_file.name = dev_name + ".en"
dev_tar.extract(fr_dev_file, path)
dev_tar.extract(en_dev_file, path)
return dev_path
logging.info("Load or Download WMT English-to-French translation > {}".format(path))
train_path = get_wmt_enfr_train_set(path)
dev_path = get_wmt_enfr_dev_set(path)
return train_path, dev_path
def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False):
"""Load Flickr25K dataset.
Returns a list of images by a given tag from Flick25k dataset,
it will download Flickr25k from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__
at the first time you use it.
Parameters
------------
tag : str or None
What images to return.
- If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.
- If you want to get all images, set to ``None``.
path : str
The path that the data is downloaded to, defaults is ``data/flickr25k/``.
n_threads : int
The number of thread to read image.
printable : boolean
Whether to print infomation when reading images, default is ``False``.
Examples
-----------
Get images with tag of sky
>>> images = tl.files.load_flickr25k_dataset(tag='sky')
Get all images
>>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True)
"""
path = os.path.join(path, 'flickr25k')
filename = 'mirflickr25k.zip'
url = 'http://press.liacs.nl/mirflickr/mirflickr25k/'
# download dataset
if folder_exists(os.path.join(path, "mirflickr")) is False:
logging.info("[*] Flickr25k is nonexistent in {}".format(path))
maybe_download_and_extract(filename, path, url, extract=True)
del_file(os.path.join(path, filename))
# return images by the given tag.
# 1. image path list
folder_imgs = os.path.join(path, "mirflickr")
path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False)
path_imgs.sort(key=natural_keys)
# 2. tag path list
folder_tags = os.path.join(path, "mirflickr", "meta", "tags")
path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False)
path_tags.sort(key=natural_keys)
# 3. select images
if tag is None:
logging.info("[Flickr25k] reading all images")
else:
logging.info("[Flickr25k] reading images with tag: {}".format(tag))
images_list = []
for idx, _v in enumerate(path_tags):
tags = read_file(os.path.join(folder_tags, path_tags[idx])).split('\n')
# logging.info(idx+1, tags)
if tag is None or tag in tags:
images_list.append(path_imgs[idx])
images = visualize.read_images(images_list, | |
'''
Segmentation of handtools from Handtool dataset using Mask R-CNN.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>:
Estimating 3D Motion and Forces of Person-Object Interactions from Monocular Video,
CVPR 2019.
https://arxiv.org/abs/1904.02683
<NAME>, 2019-2021
Intelligent Machine Perception Project (IMPACT)
http://impact.ciirc.cvut.cz/
CIIRC, Czech Technical University in Prague
Based on implementation of Mask R-CNN by Matterport (see below).
https://github.com/matterport/Mask_RCNN
Usage example:
python handtools_ijcv_recognize_endpoints.py --class_label=spade
'''
############################################################
# Load libraries
############################################################
import os
import shutil
import cv2
import numpy as np
import pickle
import json
############################################################
# Specify directories
############################################################
# Root directory of the project
ROOT_DIR = os.path.abspath('Mask_RCNN')
# Handtool dataset directories
HANDTOOLS_DIR = os.path.join('handtools')
############################################################
# Functions
############################################################
def alphanum_key(s):
"""
Alphanumeric sorting
:param s:
:return:
"""
import re
def tryint(s):
try:
return int(s)
except:
return s
return [tryint(c) for c in re.split('([0-9]+)', s)]
def get_list_of_files(path, sort=True):
"""
List files in a given path directory
:param path: Directory path
:return: List of file basenames in the directory
"""
basename_list = [basename for basename in os.listdir(path) if os.path.isfile(os.path.join(path, basename))]
if sort:
basename_list.sort(key=alphanum_key)
return basename_list
def swap_arrays(x, y):
tmp = x.copy()
x = y
y = tmp
return x, y
def load_openpose(openpose_dir, video_name, openpose_basename='handtools_openpose.pkl'):
"""
Load joints from Openpose
:param openpose_dir: directory with pose files (.pkl)
:param video_name: name of video, e.g. 'hammer_1'
:return: Openpose data ([frame_index, 18 joint indices, [x, y, score]]),
indices of joints ({'r_wrist': 4, 'l_wrist': 7})
"""
j2d_path = os.path.join(openpose_dir, openpose_basename)
with open(j2d_path, 'rb') as f:
j2d_data = pickle.load(f, encoding='bytes')
j2d_pos = j2d_data[video_name]
openpose_index_dict = {
'r_wrist': 4,
'l_wrist': 7,
}
return j2d_pos, openpose_index_dict
def endpoints_from_mask_and_bbox(fname_mask, fname_bbox=None):
"""
Compute endpoints as intersections between bounding box and line fitted to segmentation mask
:param fname_mask: File name of image with binary segmentation mask
:param fname_bbox: File name of image with binary bounding box (filled rectangle)
:return: List of endpoints [[y0, x0], [y1, x1]] or None
"""
# Segmentation mask
mask = cv2.imread(fname_mask, 0) # grayscale
if np.max(mask) <= 0: # no detection
return None
mask = np.asarray(np.round(mask / np.max(mask)), np.uint8) # binary 0/1
# Fit line through mask
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
for contour in contours[1:]:
cnt = np.vstack((cnt, contour))
[vx, vy, x, y] = cv2.fitLine(cnt, cv2.DIST_L2, 0, 0.01, 0.01)
# print('vx, vy, x, y:', vx, vy, x, y)
# Line ax + by + c = 0
a = vy[0]
b = -vx[0]
c = - (a * x[0] + b * y[0])
# print('a, b, c:', a, b, c)
# Bounding box
if fname_bbox is None:
bbox = mask # in absence of bounding box use mask instead
else:
bbox = cv2.imread(fname_bbox, 0) # grayscale
if np.max(bbox) == 0:
bbox = mask # in absence of bounding box use mask instead
else:
bbox = np.asarray(np.round(bbox / np.max(bbox)), np.uint8) # binary 0/1
y0 = np.max(np.argmax(bbox, axis=0))
y1 = np.shape(bbox)[0] - np.max(np.argmax(bbox[::-1, :], axis=0)) - 1
x0 = np.max(np.argmax(bbox, axis=1))
x1 = np.shape(bbox)[1] - np.max(np.argmax(bbox[:, ::-1], axis=1)) - 1
# print('x0, x1, y0, y1:', x0, x1, y0, y1)
# Intersection of line and bounding box
if a != 0:
xA = - (b * y0 + c) / a
xB = - (b * y1 + c) / a
xA = int(np.round(xA))
xB = int(np.round(xB))
else:
xA = np.inf
xB = np.inf
if b != 0:
yA = - (a * x0 + c) / b
yB = - (a * x1 + c) / b
yA = int(np.round(yA))
yB = int(np.round(yB))
else:
yA = np.inf
yB = np.inf
# print('xA, xB, yA, yB:', xA, xB, yA, yB)
endpoint_list = []
if x0 <= xA <= x1:
# print(xA, y0)
if [xA, y0] not in endpoint_list:
endpoint_list.append([y0, xA])
if x0 <= xB <= x1:
# print(xB, y1)
if [xB, y1] not in endpoint_list:
endpoint_list.append([y1, xB])
if y0 <= yA <= y1:
# print(x0, yA)
if [x0, yA] not in endpoint_list:
endpoint_list.append([yA, x0])
if y0 <= yB <= y1:
# print(x1, yB)
if [x1, yB] not in endpoint_list:
endpoint_list.append([yB, x1])
# print(endpoint_list)
if len(endpoint_list) > 2:
dist_max = 0
endpoint_index_list = []
for i in range(len(endpoint_list)):
for j in range(i):
dist_ij = dist_points(endpoint_list[i], endpoint_list[j])
if dist_ij > dist_max:
dist_max = dist_ij
endpoint_index_list = [j, i]
endpoint_list = [endpoint_list[endpoint_index_list[0]], endpoint_list[endpoint_index_list[1]]]
if len(endpoint_list) != 2:
return None
return endpoint_list
def dist_point_segment(point, point_1, point_2):
"""
Distance of point from segment between 2 endpoints
:param point: Coordinates [y, x] of point
:param point_1: Coordinates [y, x] of one endpoint
:param point_2: Coordinates [y, x] of the other endpoint
:return: Distance of the point from the segment (infinity if invalid)
"""
if np.any(np.isnan([point, point_1, point_2])):
return np.inf
point = np.asarray(point, float)
point_1 = np.asarray(point_1, float)
point_2 = np.asarray(point_2, float)
dist_1 = dist_points(point, point_1)
dist_2 = dist_points(point, point_2)
dist_1_2 = dist_points(point_1, point_2)
if min(dist_1, dist_2) == 0: # point == point_1 or point == point_2
return 0
if dist_1_2 == 0: # point_1 == point_2
return dist_1
dot_1 = np.dot((point_2 - point_1) / np.linalg.norm(point_2 - point_1), (point - point_1) / np.linalg.norm(point - point_1))
dot_2 = np.dot((point_1 - point_2) / np.linalg.norm(point_1 - point_2), (point - point_2) / np.linalg.norm(point - point_2))
if dot_1 < 0 and dot_2 >= 0:
dist = dist_1
elif dot_1 >= 0 and dot_2 < 0:
dist = dist_2
elif dot_1 >= 0 and dot_2 >= 0:
dist = np.abs(np.cross(point_2 - point_1, point - point_1) / np.linalg.norm(point_2 - point_1))
else:
print('Warning: Error in dist_point_segment()')
return np.inf
return dist
def dist_points(point_1, point_2):
"""
Euclidean distance between 2 points
:param point_1: Coordinates [y, x] of one point
:param point_2: Coordinates [y, x] of the other endpoint
:return: Distance between point_1 and point_2 (infinity if invalid)
"""
if np.any(np.isnan([point_1, point_2])):
return np.inf
dist = np.linalg.norm(np.asarray(point_1, float) - np.asarray(point_2, float))
return dist
def create_circular_mask(h, w=None, radius=None):
"""
Create a binary circular mask of given shape
:param h: Height
:param w: Width
:param radius: Radius
:return: Binary circular mask of shape (h, w)
"""
if w is None:
w = h
center = (w // 2, h // 2)
if radius is None:
radius = min(center[0], center[1], w-center[0], h-center[1]) + 0.5
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center < radius
mask = np.asarray(mask, np.uint8)
return mask
def draw_mask(img, mask=None, mask_rgb=(255, 0, 255), mask_border_rgb=(0, 0, 0),
mask_alpha=0.75, mask_border_alpha=1, mask_border_thickness=5):
"""
Visualize binary mask
:param img: RGB image
:param mask: Binary mask
:param mask_rgb: RGB color of mask
:param mask_border_rgb: RGB color of mask boundary
:param mask_alpha: Opacity of mask
:param mask_border_alpha: Opacity of mask boundary
:param mask_border_thickness: Thickness of mask boundary
:return: RGB image with visualized mask
"""
if mask is None or np.max(mask) <= 0: # no mask
return img
mask = mask / np.max(mask) # binary 0/1
for k in range(3): # mask
img[:, :, k] = (1 - mask) * img[:, :, k] \
+ mask * mask_alpha * mask_rgb[k]\
+ mask * (1 - mask_alpha) * img[:, :, k]
mask_border = cv2.dilate(mask, create_circular_mask(mask_border_thickness)) - mask # outer boundary
for k in range(3): # mask boundary
img[:, :, k] = (1 - mask_border) * img[:, :, k] \
+ mask_border * mask_border_alpha * mask_border_rgb[k]\
+ mask_border * (1 - mask_border_alpha) * img[:, :, k]
return img
def draw_endpoints(img, endpoint_1, endpoint_2,
endpoint_radius=10, endpoint_1_rgb=(255, 255, 0), endpoint_2_rgb=(0, 255, 255)):
"""
Visualize endpoints
:param img: RGB image
:param endpoint_1: Coordinates [y, x] of endpoint 1
:param endpoint_2: Coordinates [y, x] of endpoint 2
:param endpoint_radius: Radius of circle marker
:param endpoint_1_rgb: RGB color of enpoint 1
:param endpoint_2_rgb: RGB color of endpoint 2
:return: RGB image with visulized endpoints
"""
try:
img = np.asarray(img, float)
if not np.any(np.isnan(endpoint_2)):
cv2.circle(img, (int(endpoint_2[1]), int(endpoint_2[0])),
radius=endpoint_radius, color=endpoint_2_rgb, thickness=-1)
cv2.circle(img, (int(endpoint_2[1]), int(endpoint_2[0])),
radius=endpoint_radius+1, color=(0, 0, 0), thickness=2)
if not np.any(np.isnan(endpoint_1)):
cv2.circle(img, (int(endpoint_1[1]), int(endpoint_1[0])),
radius=endpoint_radius, color=endpoint_1_rgb, thickness=-1)
cv2.circle(img, (int(endpoint_1[1]), int(endpoint_1[0])),
radius=endpoint_radius+1, color=(0, 0, 0), thickness=2)
except Exception:
print('Warning: Error in draw_endpoints()')
return img
def frames_dict_to_list(d, n_frames=None, default_value=None):
"""
Convert dictionary to list
:param d: Python dictionary
:param length_min: Minimum length of output list
:param default_value: Value for list items that are not keys in the dictionary
:return: Python list
"""
| |
element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <P> element
"""
return self.p(words=words, **kwargs)
def phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param ph: Specifiy the phonetic symbols for pronunciation
:param kwargs: additional attributes
:returns: <Phoneme> element
"""
return self.nest(SsmlPhoneme(words, alphabet=alphabet, ph=ph, **kwargs))
@deprecated_method('phoneme')
def ssml_phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param ph: Specifiy the phonetic symbols for pronunciation
:param kwargs: additional attributes
:returns: <Phoneme> element
"""
return self.phoneme(words, alphabet=alphabet, ph=ph, **kwargs)
def prosody(self, words=None, volume=None, rate=None, pitch=None, **kwargs):
"""
Create a <Prosody> element
:param words: Words to speak
:param volume: Specify the volume, available values: default, silent, x-soft, soft, medium, loud, x-loud, +ndB, -ndB
:param rate: Specify the rate, available values: x-slow, slow, medium, fast, x-fast, n%
:param pitch: Specify the pitch, available values: default, x-low, low, medium, high, x-high, +n%, -n%
:param kwargs: additional attributes
:returns: <Prosody> element
"""
return self.nest(SsmlProsody(words=words, volume=volume, rate=rate, pitch=pitch, **kwargs))
@deprecated_method('prosody')
def ssml_prosody(self, words=None, volume=None, rate=None, pitch=None,
**kwargs):
"""
Create a <Prosody> element
:param words: Words to speak
:param volume: Specify the volume, available values: default, silent, x-soft, soft, medium, loud, x-loud, +ndB, -ndB
:param rate: Specify the rate, available values: x-slow, slow, medium, fast, x-fast, n%
:param pitch: Specify the pitch, available values: default, x-low, low, medium, high, x-high, +n%, -n%
:param kwargs: additional attributes
:returns: <Prosody> element
"""
return self.prosody(words=words, volume=volume, rate=rate, pitch=pitch, **kwargs)
def s(self, words=None, **kwargs):
"""
Create a <S> element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <S> element
"""
return self.nest(SsmlS(words=words, **kwargs))
@deprecated_method('s')
def ssml_s(self, words=None, **kwargs):
"""
Create a <S> element
:param words: Words to speak
:param kwargs: additional attributes
:returns: <S> element
"""
return self.s(words=words, **kwargs)
def say_as(self, words, interpret_as=None, role=None, **kwargs):
"""
Create a <Say-As> element
:param words: Words to be interpreted
:param interpret-as: Specify the type of words are spoken
:param role: Specify the format of the date when interpret-as is set to date
:param kwargs: additional attributes
:returns: <Say-As> element
"""
return self.nest(SsmlSayAs(words, interpret_as=interpret_as, role=role, **kwargs))
@deprecated_method('say_as')
def ssml_say_as(self, words, interpret_as=None, role=None, **kwargs):
"""
Create a <Say-As> element
:param words: Words to be interpreted
:param interpret-as: Specify the type of words are spoken
:param role: Specify the format of the date when interpret-as is set to date
:param kwargs: additional attributes
:returns: <Say-As> element
"""
return self.say_as(words, interpret_as=interpret_as, role=role, **kwargs)
def sub(self, words, alias=None, **kwargs):
"""
Create a <Sub> element
:param words: Words to be substituted
:param alias: Substitute a different word (or pronunciation) for selected text such as an acronym or abbreviation
:param kwargs: additional attributes
:returns: <Sub> element
"""
return self.nest(SsmlSub(words, alias=alias, **kwargs))
@deprecated_method('sub')
def ssml_sub(self, words, alias=None, **kwargs):
"""
Create a <Sub> element
:param words: Words to be substituted
:param alias: Substitute a different word (or pronunciation) for selected text such as an acronym or abbreviation
:param kwargs: additional attributes
:returns: <Sub> element
"""
return self.sub(words, alias=alias, **kwargs)
def w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.nest(SsmlW(words=words, role=role, **kwargs))
@deprecated_method('w')
def ssml_w(self, words=None, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.w(words=words, role=role, **kwargs)
class SsmlS(TwiML):
""" Adding A Pause Between Sentences in <Say> """
def __init__(self, words=None, **kwargs):
super(SsmlS, self).__init__(**kwargs)
self.name = 's'
if words:
self.value = words
def break_(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.nest(SsmlBreak(strength=strength, time=time, **kwargs))
@deprecated_method('break_')
def ssml_break(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.break_(strength=strength, time=time, **kwargs)
def emphasis(self, words=None, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.nest(SsmlEmphasis(words=words, level=level, **kwargs))
@deprecated_method('emphasis')
def ssml_emphasis(self, words=None, level=None, **kwargs):
"""
Create a <Emphasis> element
:param words: Words to emphasize
:param level: Specify the degree of emphasis
:param kwargs: additional attributes
:returns: <Emphasis> element
"""
return self.emphasis(words=words, level=level, **kwargs)
def lang(self, words=None, xml_lang=None, **kwargs):
"""
Create a <Lang> element
:param words: Words to speak
:param xml:lang: Specify the language
:param kwargs: additional attributes
:returns: <Lang> element
"""
return self.nest(SsmlLang(words=words, xml_lang=xml_lang, **kwargs))
@deprecated_method('lang')
def ssml_lang(self, words=None, xml_lang=None, **kwargs):
"""
Create a <Lang> element
:param words: Words to speak
:param xml:lang: Specify the language
:param kwargs: additional attributes
:returns: <Lang> element
"""
return self.lang(words=words, xml_lang=xml_lang, **kwargs)
def phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param ph: Specifiy the phonetic symbols for pronunciation
:param kwargs: additional attributes
:returns: <Phoneme> element
"""
return self.nest(SsmlPhoneme(words, alphabet=alphabet, ph=ph, **kwargs))
@deprecated_method('phoneme')
def ssml_phoneme(self, words, alphabet=None, ph=None, **kwargs):
"""
Create a <Phoneme> element
:param words: Words to speak
:param alphabet: Specify the phonetic alphabet
:param ph: Specifiy the phonetic symbols for pronunciation
:param kwargs: additional attributes
:returns: <Phoneme> element
"""
return self.phoneme(words, alphabet=alphabet, ph=ph, **kwargs)
def prosody(self, words=None, volume=None, rate=None, pitch=None, **kwargs):
"""
Create a <Prosody> element
:param words: Words to speak
:param volume: Specify the volume, available values: default, silent, x-soft, soft, medium, loud, x-loud, +ndB, -ndB
:param rate: Specify the rate, available values: x-slow, slow, medium, fast, x-fast, n%
:param pitch: Specify the pitch, available values: default, x-low, low, medium, high, x-high, +n%, -n%
:param kwargs: additional attributes
:returns: <Prosody> element
"""
return self.nest(SsmlProsody(words=words, volume=volume, rate=rate, pitch=pitch, **kwargs))
@deprecated_method('prosody')
def ssml_prosody(self, words=None, volume=None, rate=None, pitch=None,
**kwargs):
"""
Create a <Prosody> element
:param words: Words to speak
:param volume: Specify the volume, available values: default, silent, x-soft, soft, medium, loud, x-loud, +ndB, -ndB
:param rate: Specify the rate, available values: x-slow, slow, medium, fast, x-fast, n%
:param pitch: Specify the pitch, available values: default, x-low, low, medium, high, x-high, +n%, -n%
:param kwargs: additional attributes
:returns: <Prosody> element
"""
return self.prosody(words=words, volume=volume, rate=rate, pitch=pitch, **kwargs)
def say_as(self, words, interpret_as=None, role=None, **kwargs):
"""
Create a <Say-As> element
:param words: Words to be interpreted
:param interpret-as: Specify the type of words are spoken
:param role: Specify the format of the date when interpret-as is set to date
:param kwargs: additional attributes
:returns: <Say-As> element
"""
return self.nest(SsmlSayAs(words, interpret_as=interpret_as, role=role, **kwargs))
@deprecated_method('say_as')
def ssml_say_as(self, words, interpret_as=None, role=None, **kwargs):
"""
Create a <Say-As> element
:param words: Words to be interpreted
:param interpret-as: Specify the type of words are spoken
:param role: Specify the format of the date when interpret-as is set to date
:param kwargs: additional attributes
:returns: <Say-As> element
"""
return self.say_as(words, interpret_as=interpret_as, role=role, **kwargs)
def sub(self, words, alias=None, **kwargs):
"""
Create a <Sub> element
:param words: Words to be substituted
:param alias: Substitute a different word (or pronunciation) for selected text such as an acronym or abbreviation
:param kwargs: additional attributes
:returns: <Sub> element
"""
return self.nest(SsmlSub(words, alias=alias, **kwargs))
@deprecated_method('sub')
def ssml_sub(self, words, alias=None, **kwargs):
"""
Create a <Sub> element
:param words: Words to be substituted
:param alias: Substitute a different word (or pronunciation) for selected text such as an acronym or abbreviation
:param kwargs: additional attributes
:returns: <Sub> | |
# write a python program to multiply three numbers
num1 = 1.5
num2 = 6.3
num3 = -2.3
product = num1 * num2 * num3
print(f'Product: {product}')
# write a python function that when given two numbers, would divide the first number by second number and return the quotient and remainder
def divide_first_number_by_second(num1, num2):
return (num1 // num2), (num1 % num2)
# write a python function to return the largest and smallest numbers in the given list and return None if the list is empty
def largest_and_smallest(list_of_nums):
if list_of_nums:
return max(list_of_nums), min(list_of_nums)
else:
return
# write a python function that would read the given input file path and print its contents
def read_and_print_file(filepath):
with open(filepath, "r") as infile:
print( infile.read() )
# write a python program that would print the first n positive integers using a for loop
n = 62
for num in range(n):
print(num)
# write a python function that returns the input list sorted in ascending order
def sort_ascending(list_to_be_sorted):
return sorted(list_to_be_sorted)
# write a python function that returns the input list sorted in descending order
def sort_descending(list_to_be_sorted):
return sorted(list_to_be_sorted, reverse=True)
# write a python function that would return the sum of first n natural numbers, where n is the input
def sum_first_n(n):
return ( n * (n+1) ) // 2
# write a recursive python function that would return the sum of first n natural numbers, where n is the input
def sum_first_n_recursive(n):
if n == 0:
return 0
return sum_first_n_recursive(n-1) + n
# write a python function that would filter a list of dictionaries where a specified key equals given value, list_of_dictionaries, key and value are inputs to this function.
def filter_with_key_value(list_of_dicts, key, value):
return list( filter( lambda x: x.get(key) == value, list_of_dicts ) )
# write a recursive python function that takes either a list or tuple as input and reverses the order of its elements
def reverse(seq):
SeqType = type(seq)
emptySeq = SeqType()
if seq == emptySeq:
return emptySeq
restrev = reverse(seq[1:])
first = seq[0:1]
result = restrev + first
return result
# write a python function that returns the square of a given input number
def square(x):
return x**2
# write a python function that performs selection sort on the given list or tuple or string and returns the new sorted sequence
def selection_sort(list_to_be_sorted):
sorted_list = list_to_be_sorted[:]
for i in range(len(sorted_list)):
new_min = sorted_list[i]
new_min_old_place = i
for j in range(i+1, len(sorted_list)):
if new_min > sorted_list[j]:
new_min = sorted_list[j]
new_min_old_place = j
old_val = sorted_list[i]
sorted_list[i] = new_min
sorted_list[new_min_old_place] = old_val
return sorted_list
# write a python program that asks for user input and prints the given input
a = input("User Input")
print(a)
# write a python function shifts and scales all numbers in the given list by the given mean and standard deviation
def shift_and_scale(list_of_nums, mean, std):
return [ (x-mean) / std for x in list_of_nums ]
# write a python function that takes in a list of sequences and zips each corresponding element from the list into a tuple and returns the list of such tuples
def zip_(list_of_seq):
return list(zip(*list_of_seq))
# write a python program that asks user to guess a number between 1 and 5 and guess it within 3 guesses
print("Please guess a number between 1 and 5 and I will guess within 3 chances!")
guess1 = input("Is it <= 3? enter y/n \n")
if guess1 == "y":
guess2 = input("Is it <= 2? enter y/n \n")
if guess2 == "y":
guess3 = input("Is it 1? enter y/n \n")
if guess3 == "y":
print("Yay! found the number, its 1")
else:
print("Yay! found the number, its 2")
else:
print("Yay! found the number, its 3")
else:
guess2 = input("Is it 4? enter y/n \n")
if guess2 == "y":
print("Yay! found the number, its 4")
else:
print("Yay! found the number, its 5")
# write python program that would merge two dictionaries by adding the second one into the first
a = {"a": 1, "b": 3}
b = {"c": 1, "d": 3}
a.update(b)
# write a python function that would reverse the given string
def reverse_string(str_to_be_reversed):
return str_to_be_reversed[::-1]
# write a python program that would print "Hello World"
print("Hello World")
# write a python program that would swap variable values
a = 10
b = 15
a, b = b, a
# write a python program that iterates over a dictionary and prints its keys and values
a = {"a":1, "b":2, "c":3, "d":4}
for k, v in a.items():
print(k, v)
# write a python function that would print the ASCII value of a given character
def print_ascii(char):
print(ord(char))
# write a python function that takes in two numbers and returns their HCF
def hcf(num1, num2):
smaller = num1 if num1 < num2 else num2
for i in range(1, smaller+1):
if (num1 % i == 0) and (num2 % i == 0):
hcf = i
return hcf
# write a python function that takes in two numbers and returns their LCM
def lcm(num1, num2):
bigger = num1 if num1 > num2 else num2
while True:
if (bigger % num1 == 0) and (bigger % num2 == 0):
break
bigger += 1
return bigger
# write a recursive python function to calculate sum of natural numbers upto n, where n is an argument
def recursive_sum(n):
if n <= 1:
return n
else:
return n + recursive_sum(n-1)
# write a python function that deletes the last element of a list and returns the list and the deleted element
def delete_last_element(list_to_be_processed):
deleted_element = list_to_be_processed.pop()
return list_to_be_processed, deleted_element
# write a python function that takes in a list and returns a list containing the squares of the elements of the input list
def square_list_elements(list_to_be_squared):
return list( map(lambda x: x**2, list_to_be_squared) )
# write a python function that finds square roots of a given number, if the square root is an integer, else returns the message "Error - the square root is not an integer"
def find_integer_square_roots(num):
found = False
for k in range(1, (num//2)+1):
if ((k**2)==num):
found = True
break
if not found:
return "Error - the square root is not an integer"
return -k, k
# write a python program that prints out natural numbers less than or equal to the given number using a while loop
input_num = 27
while input_num:
print(input_num)
input_num -= 1
# write a python function that takes two numbers. The function divides the first number by the second and returns the answer. The function returns None, if the second number is 0
def divide(num1, num2):
if num2 == 0:
return
else:
return num1 / num2
# write a python program uses else with for loop
seq = "abcde"
for k in seq:
if k == "f":
break
else:
print("f Not Found!")
# write a recursive python function that performs merge sort on the given list or tuple or string and returns the new sorted sequence
def sort_and_merge(l1, l2):
new_list = []
i = 0
j = 0
l1_len = len(l1)
l2_len = len(l2)
while (i <= l1_len-1) and (j <= l2_len-1):
if l1[i] < l2[j]:
new_list.append(l1[i])
i +=1
else:
new_list.append(l2[j])
j +=1
if i <= (l1_len-1):
new_list += l1[i:]
if j <= (l2_len-1):
new_list += l2[j:]
return new_list
def recursive_merge_sort(list_to_be_sorted):
final_list = []
first = 0
last = len(list_to_be_sorted)
if last <= 1:
final_list.extend( list_to_be_sorted )
else:
mid = last // 2
l1 = recursive_merge_sort( list_to_be_sorted[:mid] )
l2 = recursive_merge_sort( list_to_be_sorted[mid:] )
final_list.extend( sort_and_merge( l1, l2 ) )
return final_list
# Write a function to return the mean of numbers in a list
def cal_mean(num_list:list)->float:
if num_list:
return sum(num_list)/len(num_list)
else:
return None
# Write a function to return the median of numbers in a list
def cal_median(num_list:list)->float:
if num_list:
if len(num_list)%2 != 0:
return sorted(num_list)[int(len(num_list)/2) - 1]
else:
return (sorted(num_list)[int(len(num_list)/2) - 1] + sorted(num_list)[int(len(num_list)/2)])/2
else:
return None
# Write a function to return the area of triangle by heros formula
def cal_triangle_area(a:float,b:float,c:float)->float:
if a or b or c:
s = (a+b+c)/2
if s>a and s>b and s>c:
area = (s*(s-a)*(s-b)*(s-c))**(1/2)
return round(area,2)
else:
return None
return None
# Write a function to return the area of a equilateral triangle
def cal_eq_triangle_area(a:float)->float:
if a:
return (3**(1/2))*(a**2)/4
else:
return None
# Write a function to return the area of a right angle triangle
def cal_rt_triangle_area(base:float,height:float)->float:
if base and height:
return (base*height)/2
else:
return None
# Write a | |
from __future__ import print_function
import json
import time
import math
import sys
import os
import traceback
from hashlib import sha1
from tempfile import NamedTemporaryFile
from multiprocessing import Process, Queue
from itertools import starmap, chain, islice
from boto3.s3.transfer import TransferConfig
try:
# python2
from urlparse import urlparse
from Queue import Full as QueueFull
except:
# python3
from urllib.parse import urlparse
from queue import Full as QueueFull
import click
# s3op can be launched as a stand-alone script. We must set
# PYTHONPATH for the parent Metaflow explicitly.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
# we use Metaflow's parallel_imap_unordered instead of
# multiprocessing.Pool because https://bugs.python.org/issue31886
from metaflow.util import TempDir, url_quote, url_unquote
from metaflow.multicore_utils import parallel_map
from metaflow.datatools.s3util import aws_retry, read_in_chunks
NUM_WORKERS_DEFAULT = 64
DOWNLOAD_FILE_THRESHOLD = 2 * TransferConfig().multipart_threshold
DOWNLOAD_MAX_CHUNK = 2 * 1024 * 1024 * 1024 - 1
class S3Url(object):
def __init__(
self,
bucket,
path,
url,
local,
prefix,
content_type=None,
metadata=None,
range=None,
):
self.bucket = bucket
self.path = path
self.url = url
self.local = local
self.prefix = prefix
self.content_type = content_type
self.metadata = metadata
self.range = range
def __str__(self):
return self.url
# We use error codes instead of Exceptions, which are trickier to
# handle reliably in a multi-process world
ERROR_INVALID_URL = 4
ERROR_NOT_FULL_PATH = 5
ERROR_URL_NOT_FOUND = 6
ERROR_URL_ACCESS_DENIED = 7
ERROR_WORKER_EXCEPTION = 8
ERROR_VERIFY_FAILED = 9
ERROR_LOCAL_FILE_NOT_FOUND = 10
def format_triplet(prefix, url="", local=""):
return u" ".join(url_quote(x).decode("utf-8") for x in (prefix, url, local))
# I can't understand what's the right way to deal
# with boto errors. This function can be replaced
# with better error handling code.
def normalize_client_error(err):
error_code = err.response["Error"]["Code"]
try:
return int(error_code)
except ValueError:
if error_code in ("AccessDenied", "AllAccessDisabled"):
return 403
if error_code == "NoSuchKey":
return 404
return error_code
# S3 worker pool
def worker(result_file_name, queue, mode):
# Interpret mode, it can either be a single op or something like
# info_download or info_upload which implies:
# - for download: we need to return the information as well
# - for upload: we need to not overwrite the file if it exists
modes = mode.split("_")
pre_op_info = False
if len(modes) > 1:
pre_op_info = True
mode = modes[1]
else:
mode = modes[0]
def op_info(url):
try:
head = s3.head_object(Bucket=url.bucket, Key=url.path)
to_return = {
"error": None,
"size": head["ContentLength"],
"content_type": head["ContentType"],
"metadata": head["Metadata"],
"last_modified": head["LastModified"].timestamp(),
}
except client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
to_return = {"error": ERROR_URL_NOT_FOUND, "raise_error": err}
elif error_code == 403:
to_return = {"error": ERROR_URL_ACCESS_DENIED, "raise_error": err}
else:
to_return = {"error": error_code, "raise_error": err}
return to_return
with open(result_file_name, "w") as result_file:
try:
from metaflow.datatools.s3util import get_s3_client
s3, client_error = get_s3_client()
while True:
url, idx = queue.get()
if url is None:
break
if mode == "info":
result = op_info(url)
orig_error = result.get("raise_error", None)
if orig_error:
del result["raise_error"]
with open(url.local, "w") as f:
json.dump(result, f)
elif mode == "download":
tmp = NamedTemporaryFile(dir=".", mode="wb", delete=False)
try:
if url.range:
resp = s3.get_object(
Bucket=url.bucket, Key=url.path, Range=url.range
)
else:
resp = s3.get_object(Bucket=url.bucket, Key=url.path)
sz = resp["ContentLength"]
if not url.range and sz > DOWNLOAD_FILE_THRESHOLD:
# In this case, it is more efficient to use download_file as it
# will download multiple parts in parallel (it does it after
# multipart_threshold)
s3.download_file(url.bucket, url.path, tmp.name)
else:
read_in_chunks(tmp, resp["Body"], sz, DOWNLOAD_MAX_CHUNK)
tmp.close()
os.rename(tmp.name, url.local)
except client_error as err:
tmp.close()
os.unlink(tmp.name)
error_code = normalize_client_error(err)
if error_code == 404:
result_file.write("%d %d\n" % (idx, -ERROR_URL_NOT_FOUND))
continue
elif error_code == 403:
result_file.write(
"%d %d\n" % (idx, -ERROR_URL_ACCESS_DENIED)
)
continue
else:
raise
# TODO specific error message for out of disk space
# If we need the metadata, get it and write it out
if pre_op_info:
with open("%s_meta" % url.local, mode="w") as f:
args = {"size": resp["ContentLength"]}
if resp["ContentType"]:
args["content_type"] = resp["ContentType"]
if resp["Metadata"] is not None:
args["metadata"] = resp["Metadata"]
if resp["LastModified"]:
args["last_modified"] = resp["LastModified"].timestamp()
json.dump(args, f)
# Finally, we push out the size to the result_pipe since
# the size is used for verification and other purposes and
# we want to avoid file operations for this simple process
result_file.write("%d %d\n" % (idx, resp["ContentLength"]))
else:
# This is upload, if we have a pre_op, it means we do not
# want to overwrite
do_upload = False
if pre_op_info:
result_info = op_info(url)
if result_info["error"] == ERROR_URL_NOT_FOUND:
# We only upload if the file is not found
do_upload = True
else:
# No pre-op so we upload
do_upload = True
if do_upload:
extra = None
if url.content_type or url.metadata:
extra = {}
if url.content_type:
extra["ContentType"] = url.content_type
if url.metadata is not None:
extra["Metadata"] = url.metadata
s3.upload_file(url.local, url.bucket, url.path, ExtraArgs=extra)
# We indicate that the file was uploaded
result_file.write("%d %d\n" % (idx, 0))
except:
traceback.print_exc()
sys.exit(ERROR_WORKER_EXCEPTION)
def start_workers(mode, urls, num_workers):
# We start the minimum of len(urls) or num_workers to avoid starting
# workers that will definitely do nothing
num_workers = min(num_workers, len(urls))
queue = Queue(len(urls) + num_workers)
procs = {}
# 1. push sources and destinations to the queue
for idx, elt in enumerate(urls):
queue.put((elt, idx))
# 2. push end-of-queue markers
for i in range(num_workers):
queue.put((None, None))
# 3. Prepare the result structure
sz_results = [None] * len(urls)
# 4. start processes
with TempDir() as output_dir:
for i in range(num_workers):
file_path = os.path.join(output_dir, str(i))
p = Process(target=worker, args=(file_path, queue, mode))
p.start()
procs[p] = file_path
# 5. wait for the processes to finish; we continuously update procs
# to remove all processes that have finished already
while procs:
new_procs = {}
for proc, out_path in procs.items():
proc.join(timeout=1)
if proc.exitcode is not None:
if proc.exitcode != 0:
msg = "Worker process failed (exit code %d)" % proc.exitcode
exit(msg, proc.exitcode)
# Read the output file if all went well
with open(out_path, "r") as out_file:
for line in out_file:
line_split = line.split(" ")
sz_results[int(line_split[0])] = int(line_split[1])
else:
# Put this process back in the processes to check
new_procs[proc] = out_path
procs = new_procs
return sz_results
def process_urls(mode, urls, verbose, num_workers):
if verbose:
print("%sing %d files.." % (mode.capitalize(), len(urls)), file=sys.stderr)
start = time.time()
sz_results = start_workers(mode, urls, num_workers)
end = time.time()
if verbose:
total_size = sum(sz for sz in sz_results if sz is not None and sz > 0)
bw = total_size / (end - start)
print(
"%sed %d files, %s in total, in %d seconds (%s/s)."
% (
mode.capitalize(),
len(urls),
with_unit(total_size),
end - start,
with_unit(bw),
),
file=sys.stderr,
)
return sz_results
# Utility functions
def with_unit(x):
if x > 1024 ** 3:
return "%.1fGB" % (x / 1024.0 ** 3)
elif x > 1024 ** 2:
return "%.1fMB" % (x / 1024.0 ** 2)
elif x > 1024:
return "%.1fKB" % (x / 1024.0)
else:
return "%d bytes" % x
# S3Ops class is just a wrapper for get_size and list_prefix
# required by @aws_retry decorator, which needs the reset_client
# method. Otherwise they would be just stand-alone functions.
class S3Ops(object):
def __init__(self):
self.s3 = None
self.client_error = None
def reset_client(self, hard_reset=False):
from metaflow.datatools.s3util import get_s3_client
if hard_reset or self.s3 is None:
self.s3, self.client_error = get_s3_client()
@aws_retry
def get_info(self, url):
self.reset_client()
try:
head = self.s3.head_object(Bucket=url.bucket, Key=url.path)
return (
True,
url,
[
(
S3Url(
bucket=url.bucket,
path=url.path,
url=url.url,
local=url.local,
prefix=url.prefix,
content_type=head["ContentType"],
metadata=head["Metadata"],
range=url.range,
),
head["ContentLength"],
)
],
)
except self.client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
return False, url, ERROR_URL_NOT_FOUND
elif error_code == 403:
return False, url, ERROR_URL_ACCESS_DENIED
else:
raise
@aws_retry
def list_prefix(self, prefix_url, delimiter=""):
self.reset_client()
url_base = "s3://%s/" % prefix_url.bucket
try:
paginator = self.s3.get_paginator("list_objects_v2")
urls = []
for page in paginator.paginate(
Bucket=prefix_url.bucket, Prefix=prefix_url.path, Delimiter=delimiter
):
# note that an url may be both a prefix and an object
# - the trailing slash is significant in S3
if "Contents" in page:
for key in page.get("Contents", []):
url = url_base + key["Key"]
urlobj = S3Url(
url=url,
bucket=prefix_url.bucket,
path=key["Key"],
local=generate_local_path(url),
prefix=prefix_url.url,
)
urls.append((urlobj, key["Size"]))
if "CommonPrefixes" in page:
# we get CommonPrefixes if Delimiter is a non-empty string
for key in page.get("CommonPrefixes", []):
url = url_base + key["Prefix"]
urlobj = S3Url(
url=url,
bucket=prefix_url.bucket,
path=key["Prefix"],
local=None,
prefix=prefix_url.url,
)
urls.append((urlobj, None))
return True, prefix_url, urls
except self.s3.exceptions.NoSuchBucket:
return False, prefix_url, ERROR_URL_NOT_FOUND
except self.client_error as err:
if err.response["Error"]["Code"] in ("AccessDenied", "AllAccessDisabled"):
return False, prefix_url, ERROR_URL_ACCESS_DENIED
else:
raise
# We want to reuse an s3 client instance over multiple operations.
# This is accomplished | |
<reponame>yutiansut/pika
# Disable warning Missing docstring
# pylint: disable=C0111
# Disable warning Invalid variable name
# pylint: disable=C0103
# Suppress pylint warning about access to protected member
# pylint: disable=W0212
# Suppress no-member: Twisted's reactor methods are not easily discoverable
# pylint: disable=E1101
"""twisted adapter test"""
import unittest
import mock
from nose.twistedtools import reactor, deferred
from twisted.internet import defer, error as twisted_error
from twisted.python.failure import Failure
from pika.adapters.twisted_connection import (
ClosableDeferredQueue, ReceivedMessage, TwistedChannel,
_TwistedConnectionAdapter, TwistedProtocolConnection, _TimerHandle)
from pika import spec
from pika.exceptions import (
AMQPConnectionError, ConsumerCancelled, DuplicateGetOkCallback, NackError,
UnroutableError)
from pika.frame import Method
class TestCase(unittest.TestCase):
"""Imported from twisted.trial.unittest.TestCase
We only want the assertFailure implementation, using the class directly
hides some assertion errors.
"""
def assertFailure(self, d, *expectedFailures):
"""
Fail if C{deferred} does not errback with one of C{expectedFailures}.
Returns the original Deferred with callbacks added. You will need
to return this Deferred from your test case.
"""
def _cb(ignore):
raise self.failureException(
"did not catch an error, instead got %r" % (ignore,))
def _eb(failure):
if failure.check(*expectedFailures):
return failure.value
else:
output = ('\nExpected: %r\nGot:\n%s'
% (expectedFailures, str(failure)))
raise self.failureException(output)
return d.addCallbacks(_cb, _eb)
class ClosableDeferredQueueTestCase(TestCase):
@deferred(timeout=5.0)
def test_put_closed(self):
# Verify that the .put() method errbacks when the queue is closed.
q = ClosableDeferredQueue()
q.closed = RuntimeError("testing")
d = self.assertFailure(q.put(None), RuntimeError)
d.addCallback(lambda e: self.assertEqual(e.args[0], "testing"))
return d
@deferred(timeout=5.0)
def test_get_closed(self):
# Verify that the .get() method errbacks when the queue is closed.
q = ClosableDeferredQueue()
q.closed = RuntimeError("testing")
d = self.assertFailure(q.get(), RuntimeError)
d.addCallback(lambda e: self.assertEqual(e.args[0], "testing"))
return d
def test_close(self):
# Verify that the queue can be closed.
q = ClosableDeferredQueue()
q.close("testing")
self.assertEqual(q.closed, "testing")
self.assertEqual(q.waiting, [])
self.assertEqual(q.pending, [])
def test_close_waiting(self):
# Verify that the deferred waiting for new data are errbacked when the
# queue is closed.
q = ClosableDeferredQueue()
d = q.get()
q.close(RuntimeError("testing"))
self.assertTrue(q.closed)
self.assertEqual(q.waiting, [])
self.assertEqual(q.pending, [])
return self.assertFailure(d, RuntimeError)
def test_close_twice(self):
# If a queue it called twice, it must not crash.
q = ClosableDeferredQueue()
q.close("testing")
self.assertEqual(q.closed, "testing")
q.close("testing")
self.assertEqual(q.closed, "testing")
class TwistedChannelTestCase(TestCase):
def setUp(self):
self.pika_channel = mock.Mock()
self.channel = TwistedChannel(self.pika_channel)
# This is only needed on Python2 for functools.wraps to work.
wrapped = (
"basic_cancel", "basic_get", "basic_qos", "basic_recover",
"exchange_bind", "exchange_unbind", "exchange_declare",
"exchange_delete", "confirm_delivery", "flow",
"queue_bind", "queue_declare", "queue_delete", "queue_purge",
"queue_unbind", "tx_commit", "tx_rollback", "tx_select",
)
for meth_name in wrapped:
getattr(self.pika_channel, meth_name).__name__ = meth_name
def test_repr(self):
self.pika_channel.__repr__ = lambda _s: "<TestChannel>"
self.assertEqual(
repr(self.channel),
"<TwistedChannel channel=<TestChannel>>",
)
@deferred(timeout=5.0)
def test_on_close(self):
# Verify that the channel can be closed and that pending calls and
# consumers are errbacked.
self.pika_channel.add_on_close_callback.assert_called_with(
self.channel._on_channel_closed)
calls = self.channel._calls = [defer.Deferred()]
consumers = self.channel._consumers = {
"test-delivery-tag": mock.Mock()
}
error = RuntimeError("testing")
self.channel._on_channel_closed(None, error)
consumers["test-delivery-tag"].close.assert_called_once_with(error)
self.assertEqual(len(self.channel._calls), 0)
self.assertEqual(len(self.channel._consumers), 0)
return self.assertFailure(calls[0], RuntimeError)
@deferred(timeout=5.0)
def test_basic_consume(self):
# Verify that the basic_consume method works properly.
d = self.channel.basic_consume(queue="testqueue")
self.pika_channel.basic_consume.assert_called_once()
kwargs = self.pika_channel.basic_consume.call_args_list[0][1]
self.assertEqual(kwargs["queue"], "testqueue")
on_message = kwargs["on_message_callback"]
def check_cb(result):
queue, _consumer_tag = result
# Make sure the queue works
queue_get_d = queue.get()
queue_get_d.addCallback(
self.assertEqual,
(self.channel, "testmethod", "testprops", "testbody")
)
# Simulate reception of a message
on_message("testchan", "testmethod", "testprops", "testbody")
return queue_get_d
d.addCallback(check_cb)
# Simulate a ConsumeOk from the server
frame = Method(1, spec.Basic.ConsumeOk(consumer_tag="testconsumertag"))
kwargs["callback"](frame)
return d
@deferred(timeout=5.0)
def test_basic_consume_while_closed(self):
# Verify that a Failure is returned when the channel's basic_consume
# is called and the channel is closed.
error = RuntimeError("testing")
self.channel._on_channel_closed(None, error)
d = self.channel.basic_consume(queue="testqueue")
return self.assertFailure(d, RuntimeError)
@deferred(timeout=5.0)
def test_basic_consume_failure(self):
# Verify that a Failure is returned when the channel's basic_consume
# method fails.
self.pika_channel.basic_consume.side_effect = RuntimeError()
d = self.channel.basic_consume(queue="testqueue")
return self.assertFailure(d, RuntimeError)
@deferred(timeout=5.0)
def test_queue_delete(self):
# Verify that the consumers are cleared when a queue is deleted.
queue_obj = mock.Mock()
self.channel._consumers = {
"test-delivery-tag": queue_obj,
}
self.channel._queue_name_to_consumer_tags["testqueue"] = set([
"test-delivery-tag"
])
self.channel._calls = set()
self.pika_channel.queue_delete.__name__ = "queue_delete"
d = self.channel.queue_delete(queue="testqueue")
self.pika_channel.queue_delete.assert_called_once()
call_kw = self.pika_channel.queue_delete.call_args_list[0][1]
self.assertEqual(call_kw["queue"], "testqueue")
def check(_):
self.assertEqual(len(self.channel._consumers), 0)
queue_obj.close.assert_called_once()
close_call_args = queue_obj.close.call_args_list[0][0]
self.assertEqual(len(close_call_args), 1)
self.assertTrue(isinstance(close_call_args[0], ConsumerCancelled))
d.addCallback(check)
# Simulate a server response
self.assertEqual(len(self.channel._calls), 1)
list(self.channel._calls)[0].callback(None)
return d
@deferred(timeout=5.0)
def test_wrapped_method(self):
# Verify that the wrapped method is called and the result is properly
# transmitted via the Deferred.
self.pika_channel.queue_declare.__name__ = "queue_declare"
d = self.channel.queue_declare(queue="testqueue")
self.pika_channel.queue_declare.assert_called_once()
call_kw = self.pika_channel.queue_declare.call_args_list[0][1]
self.assertIn("queue", call_kw)
self.assertEqual(call_kw["queue"], "testqueue")
self.assertIn("callback", call_kw)
self.assertTrue(callable(call_kw["callback"]))
call_kw["callback"]("testresult")
d.addCallback(self.assertEqual, "testresult")
return d
@deferred(timeout=5.0)
def test_wrapped_method_while_closed(self):
# Verify that a Failure is returned when one of the channel's wrapped
# methods is called and the channel is closed.
error = RuntimeError("testing")
self.channel._on_channel_closed(None, error)
self.pika_channel.queue_declare.__name__ = "queue_declare"
d = self.channel.queue_declare(queue="testqueue")
return self.assertFailure(d, RuntimeError)
@deferred(timeout=5.0)
def test_wrapped_method_multiple_args(self):
# Verify that multiple arguments to the callback are properly converted
# to a tuple for the Deferred's result.
self.pika_channel.queue_declare.__name__ = "queue_declare"
d = self.channel.queue_declare(queue="testqueue")
call_kw = self.pika_channel.queue_declare.call_args_list[0][1]
call_kw["callback"]("testresult-1", "testresult-2")
d.addCallback(self.assertEqual, ("testresult-1", "testresult-2"))
return d
@deferred(timeout=5.0)
def test_wrapped_method_failure(self):
# Verify that exceptions are properly handled in wrapped methods.
error = RuntimeError("testing")
self.pika_channel.queue_declare.__name__ = "queue_declare"
self.pika_channel.queue_declare.side_effect = error
d = self.channel.queue_declare(queue="testqueue")
return self.assertFailure(d, RuntimeError)
def test_method_not_wrapped(self):
# Test that only methods that can be wrapped are wrapped.
result = self.channel.basic_ack()
self.assertFalse(isinstance(result, defer.Deferred))
self.pika_channel.basic_ack.assert_called_once()
def test_passthrough(self):
# Check the simple attribute passthroughs
attributes = (
"channel_number", "connection", "is_closed", "is_closing",
"is_open", "flow_active", "consumer_tags",
)
for name in attributes:
value = "testvalue-{}".format(name)
setattr(self.pika_channel, name, value)
self.assertEqual(getattr(self.channel, name), value)
def test_callback_deferred(self):
# Check that the deferred will be called back.
d = defer.Deferred()
replies = [spec.Basic.CancelOk]
self.channel.callback_deferred(d, replies)
self.pika_channel.add_callback.assert_called_with(
d.callback, replies)
def test_add_on_return_callback(self):
# Check that the deferred contains the right value.
cb = mock.Mock()
self.channel.add_on_return_callback(cb)
self.pika_channel.add_on_return_callback.assert_called_once()
self.pika_channel.add_on_return_callback.call_args[0][0](
"testchannel", "testmethod", "testprops", "testbody")
cb.assert_called_once()
self.assertEqual(len(cb.call_args[0]), 1)
self.assertEqual(
cb.call_args[0][0],
(self.channel, "testmethod", "testprops", "testbody")
)
@deferred(timeout=5.0)
def test_basic_cancel(self):
# Verify that basic_cancels calls clean up the consumer queue.
queue_obj = mock.Mock()
queue_obj_2 = mock.Mock()
self.channel._consumers["test-consumer"] = queue_obj
self.channel._consumers["test-consumer-2"] = queue_obj_2
self.channel._queue_name_to_consumer_tags.update({
"testqueue": set(["test-consumer"]),
"testqueue-2": set(["test-consumer-2"]),
})
d = self.channel.basic_cancel("test-consumer")
def check(result):
self.assertTrue(isinstance(result, Method))
queue_obj.close.assert_called_once()
self.assertTrue(isinstance(
queue_obj.close.call_args[0][0], ConsumerCancelled))
self.assertEqual(len(self.channel._consumers), 1)
queue_obj_2.close.assert_not_called()
self.assertEqual(
self.channel._queue_name_to_consumer_tags["testqueue"],
set())
d.addCallback(check)
self.pika_channel.basic_cancel.assert_called_once()
self.pika_channel.basic_cancel.call_args[1]["callback"](
Method(1, spec.Basic.CancelOk(consumer_tag="test-consumer"))
)
return d
@deferred(timeout=5.0)
def test_basic_cancel_no_consumer(self):
# Verify that basic_cancel does not crash if there is no consumer.
d = self.channel.basic_cancel("test-consumer")
def check(result):
self.assertTrue(isinstance(result, Method))
d.addCallback(check)
self.pika_channel.basic_cancel.assert_called_once()
self.pika_channel.basic_cancel.call_args[1]["callback"](
Method(1, spec.Basic.CancelOk(consumer_tag="test-consumer"))
)
return d
def test_consumer_cancelled_by_broker(self):
# Verify that server-originating cancels are handled.
self.pika_channel.add_on_cancel_callback.assert_called_with(
self.channel._on_consumer_cancelled_by_broker)
queue_obj = mock.Mock()
self.channel._consumers["test-consumer"] = queue_obj
self.channel._queue_name_to_consumer_tags["testqueue"] = set([
"test-consumer"])
self.channel._on_consumer_cancelled_by_broker(
Method(1, spec.Basic.Cancel(consumer_tag="test-consumer"))
)
queue_obj.close.assert_called_once()
self.assertTrue(isinstance(
queue_obj.close.call_args[0][0], ConsumerCancelled))
self.assertEqual(self.channel._consumers, {})
self.assertEqual(
self.channel._queue_name_to_consumer_tags["testqueue"],
set())
@deferred(timeout=5.0)
def test_basic_get(self):
# Verify that the basic_get method works properly.
d = self.channel.basic_get(queue="testqueue")
self.pika_channel.basic_get.assert_called_once()
kwargs = self.pika_channel.basic_get.call_args_list[0][1]
self.assertEqual(kwargs["queue"], "testqueue")
def check_cb(result):
self.assertEqual(
result,
(self.channel, "testmethod", "testprops", "testbody")
)
d.addCallback(check_cb)
# Simulate reception of a message
kwargs["callback"](
"testchannel", "testmethod", "testprops", "testbody")
return d
def test_basic_get_twice(self):
# Verify that the basic_get method raises the proper exception when
# called twice.
self.channel.basic_get(queue="testqueue")
self.assertRaises(
DuplicateGetOkCallback, self.channel.basic_get, "testqueue")
@deferred(timeout=5.0)
def test_basic_get_empty(self):
# Verify that the basic_get method works when the queue is empty.
self.pika_channel.add_callback.assert_called_with(
self.channel._on_getempty, [spec.Basic.GetEmpty], False)
d = self.channel.basic_get(queue="testqueue")
self.channel._on_getempty("testmethod")
d.addCallback(self.assertIsNone)
return d
def test_basic_nack(self):
# Verify that basic_nack is transmitted properly.
self.channel.basic_nack("testdeliverytag")
self.pika_channel.basic_nack.assert_called_once_with(
delivery_tag="testdeliverytag",
multiple=False, requeue=True)
@deferred(timeout=5.0)
def test_basic_publish(self):
# Verify that basic_publish wraps properly.
args = [object()]
kwargs = {"routing_key": object(), "body": object()}
d = self.channel.basic_publish(*args, **kwargs)
kwargs.update(dict(
# Args are converted to kwargs
exchange=args[0],
# Defaults
immediate=False, mandatory=False, properties=None,
))
self.pika_channel.basic_publish.assert_called_once_with(
**kwargs)
return d
@deferred(timeout=5.0)
def test_basic_publish_closed(self):
# Verify that a Failure is returned when the channel's basic_publish
# is called and the channel is closed.
self.channel._on_channel_closed(None, RuntimeError("testing"))
d = self.channel.basic_publish(None, None, None)
self.pika_channel.basic_publish.assert_not_called()
d = self.assertFailure(d, RuntimeError)
d.addCallback(lambda e: self.assertEqual(e.args[0], "testing"))
return d
def _test_wrapped_func(self, func, kwargs, do_callback=False):
func.assert_called_once()
call_kw = dict(
(key, value) for key, value in
func.call_args[1].items()
if key != "callback"
)
self.assertEqual(kwargs, call_kw)
if do_callback:
func.call_args[1]["callback"](do_callback)
@deferred(timeout=5.0)
def test_basic_qos(self):
# Verify that basic_qos wraps properly.
kwargs = {"prefetch_size": 2}
d = self.channel.basic_qos(**kwargs)
# Defaults
kwargs.update(dict(prefetch_count=0, all_channels=False))
self._test_wrapped_func(self.pika_channel.basic_qos, kwargs, True)
return d
def test_basic_reject(self):
# Verify that basic_reject is transmitted properly.
self.channel.basic_reject("testdeliverytag")
self.pika_channel.basic_reject.assert_called_once_with(
delivery_tag="testdeliverytag", requeue=True)
@deferred(timeout=5.0)
def test_basic_recover(self):
# Verify that basic_recover wraps properly.
d = self.channel.basic_recover()
self._test_wrapped_func(
self.pika_channel.basic_recover, {"requeue": False}, True)
return d
def test_close(self):
# Verify that close wraps properly.
self.channel.close()
self.pika_channel.close.assert_called_once_with(
reply_code=0, reply_text="Normal shutdown")
@deferred(timeout=5.0)
def test_confirm_delivery(self):
# Verify that confirm_delivery works
d = self.channel.confirm_delivery()
self.pika_channel.confirm_delivery.assert_called_once()
self.assertEqual(
self.pika_channel.confirm_delivery.call_args[1][
"ack_nack_callback"],
self.channel._on_delivery_confirmation)
def send_message(_result):
d = | |
<reponame>ArcGIS/public-transit-tools
############################################################################
## Tool name: BetterBusBuffers - Count Trips at Stops by Route and Direction
## Created by: <NAME>, https://github.com/d-wasserman and <NAME>, Esri
## This tool was developed as part of Transit R&D Efforts from Fehr & Peers.
## Fehr & Peers contributes this tool to the BBB Toolset to further more
## informed planning.
## Last updated: 25 September 2021
############################################################################
''' BetterBusBuffers - Count Trips at Stops by Route and Direction
BetterBusBuffers provides a quantitative measure of access to public transit
in your city by counting the transit trip frequency at various locations.
The Count Trips at Stops by Route and Direction outputs a feature class where
every GTFS stop is duplicated for every route-direction combination that uses
that stop during the analysis time windows. Each point will represent a unique
combination of stop id, route id, and direction id, and the frequency statistics
that relate to each of them for the analyzed time window.
'''
################################################################################
'''Copyright 2021 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
"""Copyright 2020 Fehr & Peers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
################################################################################
"""Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
################################################################################
import arcpy
import BBB_SharedFunctions
import sqlite3
def runTool(output_stop_file, SQLDbase, time_window_value_table, snap_to_nearest_5_minutes):
def RetrieveFrequencyStatsForStop(stop_id, stoptimedict, start_sec, end_sec):
'''For a given stop, query the dictionary
and return the NumTrips, NumTripsPerHr, MaxWaitTime, and AvgHeadway given a
specific route_id and direction. If snap to nearest five minutes is true, then
this function will return headways snapped to the closest 5 minute interval.'''
# Make a list of stop_times
StopTimesAtThisPoint = []
try:
for trip in stoptimedict[stop_id]:
StopTimesAtThisPoint.append(trip[1])
except KeyError:
pass
StopTimesAtThisPoint.sort()
# Calculate the number of trips
NumTrips = len(StopTimesAtThisPoint)
NumTripsPerHr = round(float(NumTrips) / ((end_sec - start_sec) / 3600), 2)
# Get the max wait time and the average headway
MaxWaitTime = BBB_SharedFunctions.CalculateMaxWaitTime(StopTimesAtThisPoint, start_sec, end_sec)
if snap_to_nearest_5_minutes:
round_to = 5
else:
round_to = None
AvgHeadway = BBB_SharedFunctions.CalculateAvgHeadway(StopTimesAtThisPoint, round_to)
return NumTrips, NumTripsPerHr, MaxWaitTime, AvgHeadway
# ----- Get input parameters and set things up. -----
# Check software version and fail out quickly if it's not sufficient.
BBB_SharedFunctions.CheckArcVersion(min_version_pro="1.2")
arcpy.AddMessage("Reading data...")
# Connect to SQL database of preprocessed GTFS from Step 1
conn = BBB_SharedFunctions.conn = sqlite3.connect(SQLDbase)
c = BBB_SharedFunctions.c = conn.cursor()
# Store frequencies if relevant
frequencies_dict = BBB_SharedFunctions.MakeFrequenciesDict()
# Get unique route_id/direction_id pairs and calculate the trips used in each
# Some GTFS datasets use the same route_id to identify trips traveling in
# either direction along a route. Others identify it as a different route.
# We will consider each direction separately if there is more than one.
trip_route_dict = {} # {(route_id, direction_id): [(trip_id, service_id),..]}
triproutefetch = '''SELECT DISTINCT route_id,direction_id FROM trips;'''
c.execute(triproutefetch)
for rtpair in c.fetchall():
route_id = rtpair[0]
direction_id = rtpair[1]
if str(direction_id).strip() == "": # Handle blanks
direction_id = None
# Get list of trips
# Ignore direction if this route doesn't have a direction
if direction_id is not None and str(direction_id).strip():
triproutefetch = '''
SELECT trip_id, service_id FROM trips
WHERE route_id = '{0}' AND direction_id = {1};'''.format(route_id, direction_id)
else:
triproutefetch = '''
SELECT trip_id, service_id FROM trips
WHERE route_id = '{0}';'''.format(route_id)
c.execute(triproutefetch)
triproutelist = c.fetchall()
key = (route_id, direction_id)
trip_route_dict[key] = triproutelist
# ----- For each time window, calculate the stop frequency -----
final_stop_freq_dict = {} # {(stop_id, route_id, direction_id): {prefix: (NumTrips, NumTripsPerHour, MaxWaitTimeSec, AvgHeadwayMin)}}
# The time_window_value_table will be a list of nested lists of strings like:
# [[Weekday name or YYYYMMDD date, HH: MM, HH: MM, Departures / Arrivals, Prefix], [], ...]
for time_window in time_window_value_table:
# Prefix/identifier associated with this time window
prefix = time_window[4]
arcpy.AddMessage("Calculating statistics for time window %s..." % prefix)
# Clean up date and determine whether it's a date or a weekday
Specific, day = BBB_SharedFunctions.CheckSpecificDate(time_window[0])
# Convert times to seconds
start_time = time_window[1]
end_time = time_window[2]
if not start_time:
start_time = "00:00"
if not end_time:
end_time = "23:59"
start_sec, end_sec = BBB_SharedFunctions.ConvertTimeWindowToSeconds(start_time, end_time)
# Clean up arrival/departure time choice
DepOrArr = BBB_SharedFunctions.CleanUpDepOrArr(time_window[3])
# Get the trips running in this time window for each route/direction pair
# Get the service_ids serving the correct days
serviceidlist, serviceidlist_yest, serviceidlist_tom = \
BBB_SharedFunctions.GetServiceIDListsAndNonOverlaps(day, start_sec, end_sec, DepOrArr, Specific)
# Retrieve the stop_times for the time window broken out by route/direction
for rtdirpair in trip_route_dict:
# Get trips running with these service_ids
trip_serv_list = trip_route_dict[rtdirpair]
triplist = []
for tripserv in trip_serv_list:
# Only keep trips running on the correct day
if tripserv[1] in serviceidlist or tripserv[1] in serviceidlist_tom or \
tripserv[1] in serviceidlist_yest:
triplist.append(tripserv[0])
# Get the stop_times that occur during this time window for these trips
try:
stoptimedict = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow(
start_sec, end_sec, DepOrArr, triplist, "today", frequencies_dict)
except KeyError: # No trips
pass
try:
stoptimedict_yest = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow(
start_sec, end_sec, DepOrArr, triplist, "yesterday", frequencies_dict)
except KeyError: # No trips
pass
try:
stoptimedict_tom = BBB_SharedFunctions.GetStopTimesForStopsInTimeWindow(
start_sec, end_sec, DepOrArr, triplist, "tomorrow", frequencies_dict)
except KeyError: # No trips
pass
# Combine the three dictionaries into one master
for stop in stoptimedict_yest:
stoptimedict[stop] = stoptimedict.setdefault(stop, []) + stoptimedict_yest[stop]
for stop in stoptimedict_tom:
stoptimedict[stop] = stoptimedict.setdefault(stop, []) + stoptimedict_tom[stop]
for stop in stoptimedict.keys():
# Get Stop-Route-Dir Frequencies by time period
vals = RetrieveFrequencyStatsForStop(stop, stoptimedict, start_sec, end_sec)
key = (stop, rtdirpair[0], rtdirpair[1],)
if key not in final_stop_freq_dict:
final_stop_freq_dict[key] = {prefix: vals}
else:
final_stop_freq_dict[key][prefix] = vals
# ----- Write the stops and stats to the output feature class -----
arcpy.AddMessage("Writing outputs...")
# Make the basic feature class for stops with correct gtfs fields
with arcpy.EnvManager(overwriteOutput=True):
output_coords = BBB_SharedFunctions.CreateStopsFeatureClass(output_stop_file)
# Add fields specific to this tool's outputs
arcpy.management.AddField(output_stop_file, 'route_id', "TEXT")
arcpy.management.AddField(output_stop_file, 'direction_id', "SHORT")
# Create fields for stats for each time window using prefix
base_field_names = ['_NumTrips', '_NumTripsPerHr', '_MaxWaitTime', '_AvgHeadway']
new_fields = []
for time_window in time_window_value_table:
for base_field in base_field_names:
new_field = time_window[4] + base_field
new_fields.append(new_field)
arcpy.management.AddField(output_stop_file, new_field, "DOUBLE")
# Get the stop info from the GTFS SQL file
StopTable = BBB_SharedFunctions.GetStopsData()
stop_dict = {stop[0]: stop for stop in StopTable}
# Make a dictionary to track whether we have inserted all stops at least once into the output
used_stops = {stop[0]: False for stop in StopTable}
# Store stop geometries in dictionary so they can be inserted multiple times without recalculating
stop_geoms = {stop[0]: BBB_SharedFunctions.MakeStopGeometry(stop[4], stop[5], output_coords) for stop in StopTable}
# Add the stops with stats to the feature class
fields = [
"SHAPE@", "stop_id", "stop_code", "stop_name", "stop_desc", "zone_id", "stop_url", "location_type",
"parent_station", "route_id", "direction_id"
] + new_fields
with arcpy.da.InsertCursor(output_stop_file, fields) as cur3:
# Iterate over all unique stop, route_id, direction_id groups and insert values
for key in sorted(final_stop_freq_dict.keys(), key=lambda x: (x[0], x[1], x[2] if x[2] is not None else -1)):
stop_id = key[0]
used_stops[stop_id] = True
route_id = key[1]
direction_id = key[2]
stop_data = stop_dict[stop_id]
# Schema of StopTable
## 0 - stop_id
## 1 - stop_code
## 2 - | |
{'REGISTER', 'UNDO'}
def execute(self, context):
ob = get_last_object() # returns tuple with list and last cloth objects or None
if not ob: return {'CANCELLED'}
for i, pin in reversed(list(enumerate(ob[1].mclo.pins))):
bpy.data.objects.remove(pin.hook)
ob[1].mclo.pins.remove(i)
bpy.context.scene.objects.active = ob[1]
return {'FINISHED'}
class SelectPins(bpy.types.Operator):
"""Select modeling cloth pins for current object"""
bl_idname = "object.select_modeling_cloth_pins"
bl_label = "Select Modeling Cloth Pins"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = get_last_object() # returns list and last cloth objects or None
if not ob: return {'CANCELLED'}
#bpy.ops.object.select_all(action='DESELECT')
for pin in ob[1].mclo.pins:
pin.hook.select = True
return {'FINISHED'}
class PinSelected(bpy.types.Operator):
"""Add pins to verts selected in edit mode"""
bl_idname = "object.modeling_cloth_pin_selected"
bl_label = "Modeling Cloth Pin Selected"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = bpy.context.object
bpy.ops.object.mode_set(mode='OBJECT')
sel = [i.index for i in ob.data.vertices if i.select]
matrix = ob.matrix_world.copy()
for v in sel:
e = bpy.data.objects.new('modeling_cloth_pin', None)
bpy.context.scene.objects.link(e)
if ob.active_shape_key is None:
closest = matrix * ob.data.vertices[v].co# * matrix
else:
closest = matrix * ob.active_shape_key.data[v].co# * matrix
e.location = closest #* matrix
e.show_x_ray = True
e.select = True
e.empty_draw_size = .1
pin = ob.mclo.pins.add()
pin.vertex_id = v
pin.hook = e
ob.select = False
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}
class GrowSource(bpy.types.Operator):
"""Grow Source Shape"""
bl_idname = "object.modeling_cloth_grow"
bl_label = "Modeling Cloth Grow"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scale_source(1.02)
return {'FINISHED'}
class ShrinkSource(bpy.types.Operator):
"""Shrink Source Shape"""
bl_idname = "object.modeling_cloth_shrink"
bl_label = "Modeling Cloth Shrink"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scale_source(0.98)
return {'FINISHED'}
class ResetShapes(bpy.types.Operator):
"""Reset Shapes"""
bl_idname = "object.modeling_cloth_reset"
bl_label = "Modeling Cloth Reset"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
reset_shapes()
return {'FINISHED'}
class AddVirtualSprings(bpy.types.Operator):
"""Add Virtual Springs Between All Selected Vertices"""
bl_idname = "object.modeling_cloth_add_virtual_spring"
bl_label = "Modeling Cloth Add Virtual Spring"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
add_remove_virtual_springs()
return {'FINISHED'}
class RemoveVirtualSprings(bpy.types.Operator):
"""Remove Virtual Springs Between All Selected Vertices"""
bl_idname = "object.modeling_cloth_remove_virtual_spring"
bl_label = "Modeling Cloth Remove Virtual Spring"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
add_remove_virtual_springs(remove=True)
return {'FINISHED'}
class ModelingClothObject(bpy.types.PropertyGroup):
ob = PointerProperty(type=bpy.types.Object)
class ModelingClothCollider(bpy.types.PropertyGroup):
ob = PointerProperty(type=bpy.types.Object)
class ModelingClothGlobals(bpy.types.PropertyGroup):
cloth_pointers = CollectionProperty(
name="Modeling Cloth Objects",
description = 'List of cloth objects for quick pointers',
type=ModelingClothObject)
collider_pointers = CollectionProperty(
name="Modeling Cloth Colliders",
description = 'List of collider objects for quick pointers',
type=ModelingClothCollider)
drag_alert = BoolProperty(default=False)
pin_alert = BoolProperty(default=False)
last_object = PointerProperty(type=bpy.types.Object)
class ModelingClothPinObject(bpy.types.PropertyGroup):
vertex_id = IntProperty(default=-1)
hook = PointerProperty(type=bpy.types.Object)
class ApplyClothToMesh(bpy.types.Operator):
"""Apply cloth effects to mesh for export."""
bl_idname = "object.modeling_cloth_apply_cloth_to_mesh"
bl_label = "Modeling Cloth Remove Virtual Spring"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = get_last_object()[1]
v_count = len(ob.data.vertices)
co = np.zeros(v_count * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth key'].data.foreach_get('co', co)
ob.data.shape_keys.key_blocks['Basis'].data.foreach_set('co', co)
ob.data.shape_keys.key_blocks['Basis'].mute = True
ob.data.shape_keys.key_blocks['Basis'].mute = False
ob.data.vertices.foreach_set('co', co)
ob.data.update()
return {'FINISHED'}
class ModelingClothVirtualSpring(bpy.types.PropertyGroup):
vertex_id_1 = IntProperty(default=-1)
vertex_id_2 = IntProperty(default=-1)
class ModelingClothObjectProps(bpy.types.PropertyGroup):
enable = BoolProperty(name="Enable Modeling Cloth",
description="For toggling modeling cloth",
default=False, update=enable_cloth)
floor = BoolProperty(name="Modeling Cloth Floor",
description="Stop at floor",
default=False)
# handler type ----->>>
scene_update = BoolProperty(name="Modeling Cloth Continuous Update",
description="Choose continuous update",
default=False, update=manage_continuous_handler)
frame_update = BoolProperty(name="Modeling Cloth Handler Animation Update",
description="Choose animation update",
default=False, update=manage_animation_handler)
auto_reset = BoolProperty(name="Modeling Cloth Reset at Frame 1",
description="Automatically reset if the current frame number is 1 or less",
default=False)#, update=manage_handlers)
# ------------------>>>
noise = FloatProperty(name="Modeling Cloth Noise",
description="Set the noise strength",
default=0.001, precision=4, min=0, max=1, update=refresh_noise)
noise_decay = FloatProperty(name="Modeling Cloth Noise Decay",
description="Multiply the noise by this value each iteration",
default=0.99, precision=4, min=0, max=1)#, update=refresh_noise_decay)
# spring forces ------------>>>
spring_force = FloatProperty(name="Modeling Cloth Spring Force",
description="Set the spring force",
default=1.0, precision=4, min=0, max=2.5)#, update=refresh_noise)
push_springs = FloatProperty(name="Modeling Cloth Push Spring Force",
description="Set the push spring force",
default=1.0, precision=4, min=0, max=2.5)#, update=refresh_noise)
bend_stiff = FloatProperty(name="Modeling Cloth Bend Spring Force",
description="Set the bend spring force",
default=0.0, precision=4, min=0, max=10, soft_max=1)#, update=refresh_noise)
# -------------------------->>>
gravity = FloatProperty(name="Modeling Cloth Gravity",
description="Modeling cloth gravity",
default=0.0, precision=4, soft_min=-10, soft_max=10, min=-1000, max=1000)
iterations = IntProperty(name="Iterations",
description="How stiff the cloth is",
default=2, min=1, max=500)#, update=refresh_noise_decay)
velocity = FloatProperty(name="Velocity",
description="Cloth keeps moving",
default=.98, min= -200, max=200, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
# Wind. Note, wind should be measured agains normal and be at zero when normals are at zero. Squared should work
wind_x = FloatProperty(name="Wind X",
description="Not the window cleaner",
default=0, min= -10, max=10, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
wind_y = FloatProperty(name="Wind Y",
description="Y? Because wind is cool",
default=0, min= -10, max=10, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
wind_z = FloatProperty(name="Wind Z",
description="It's windzee outzide",
default=0, min= -10, max=10, soft_min= -1, soft_max=1)#, update=refresh_noise_decay)
turbulence = FloatProperty(name="Wind Turbulence",
description="Add Randomness to wind",
default=0, min=0, max=10, soft_min= 0, soft_max=1)#, update=refresh_noise_decay)
# self collision ----->>>
self_collision = BoolProperty(name="Modeling Cloth Self Collsion",
description="Toggle self collision",
default=False)#, update=collision_data_update)
# self_collision_force = FloatProperty(name="recovery force",
# description="Self colide faces repel",
# default=.17, precision=4, min= -1.1, max=1.1, soft_min= 0, soft_max=1)
self_collision_margin = FloatProperty(name="Margin",
description="Self colide faces margin",
default=.08, precision=4, min= -1, max=1, soft_min= 0, soft_max=1)
# self_collision_cy_size = FloatProperty(name="Cylinder size",
# description="Self colide faces cylinder size",
# default=1, precision=4, min= 0, max=4, soft_min= 0, soft_max=1.5)
# ---------------------->>>
# extras ------->>>
inflate = FloatProperty(name="inflate",
description="add force to vertex normals",
default=0, precision=4, min= -10, max=10, soft_min= -1, soft_max=1)
sew = FloatProperty(name="sew",
description="add force to vertex normals",
default=0, precision=4, min= -10, max=10, soft_min= -1, soft_max=1)
# -------------->>>
# external collisions ------->>>
object_collision = BoolProperty(name="Modeling Cloth Self Collsion",
description="Detect and collide with this object",
default=False, update=collision_object_update)
#collision_animated = bpy.props.BoolProperty(name="Modeling Cloth Collsion Animated",
#description="Treat collide object as animated. (turn off for speed on static objects)",
#default=True)#, update=collision_object_update)
object_collision_detect = BoolProperty(name="Modeling Cloth Self Collsion",
description="Detect collision objects",
default=True, update=cloth_object_update)
object_collision_outer_margin = FloatProperty(name="Modeling Cloth Outer Margin",
description="Collision margin on positive normal side of face",
default=0.04, precision=4, min=0, max=100, soft_min=0, soft_max=1000)
object_collision_inner_margin = FloatProperty(name="Modeling Cloth Inner Margin",
description="Collision margin on negative normal side of face",
default=0.08, precision=4, min=0, max=100, soft_min=0, soft_max=1000)
# ---------------------------->>>
# more collision stuff ------->>>
grid_size = IntProperty(name="Modeling Cloth Grid Size",
description="Max subdivisions for the dynamic broad phase grid",
default=10, min=0, max=1000, soft_min=0, soft_max=1000)
# Not for manual editing ----->>>
waiting = BoolProperty(name='Pause Cloth Update',
default=False)
clicked = BoolProperty(name='Click for drag event',
default=False)
pins = CollectionProperty(name="Modeling Cloth Pins",
type=ModelingClothPinObject)
virtual_springs = CollectionProperty(name="Modeling Cloth Virtual Springs",
type=ModelingClothVirtualSpring)
def create_properties():
bpy.types.Scene.mclo = PointerProperty(type=ModelingClothGlobals)
bpy.types.Object.mclo = PointerProperty(type=ModelingClothObjectProps)
# property dictionaries
bpy.types.Scene.modeling_cloth_data_set = {}
bpy.types.Scene.modeling_cloth_data_set_colliders = {}
bpy.types.Scene.modeling_cloth_data_set_extra = {}
def remove_properties():
'''Drives to the grocery store and buys a sandwich'''
# No need to remove properties because yolo
pass
@persistent
def refresh_cloth_data(scene):
# Create new data based on available clothes and colliders
scene = bpy.context.scene
for cp in scene.mclo.cloth_pointers:
if cp.ob:
create_cloth_data(cp.ob)
for cp in scene.mclo.collider_pointers:
if cp.ob:
create_collider_data(cp.ob)
class ModelingClothPanel(bpy.types.Panel):
"""Modeling Cloth Panel"""
bl_label = "Modeling Cloth Panel"
bl_idname = "Modeling Cloth"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Extended Tools"
#gt_show = True
def draw(self, context):
scene = context.scene
status = False
layout = self.layout
# tools
col = layout.column(align=True)
col.label(text="Tools")
col.operator("object.modeling_cloth_create_sew_lines", text="Sew Lines", icon="MOD_UVPROJECT")
col.operator("object.modeling_cloth_apply_cloth_to_mesh", text="Apply to Mesh", icon="FILE_TICK")
# modeling cloth
col = layout.column(align=True)
col.label(text="Modeling Cloth")
ob = bpy.context.object
cloths = [i for i in bpy.data.objects if i.mclo.enable] # so we can select an empty and keep the settings menu up
if len(cloths) > 0:
status = scene.mclo.pin_alert
if ob is not None:
if ob.type != 'MESH' or status:
ob = scene.mclo.last_object
if ob is not None:
if ob.type == 'MESH':
col.prop(ob.mclo ,"enable", text="Modeling Cloth", icon='SURFACE_DATA')
if ob.mclo.enable:
col.prop(ob.mclo ,"self_collision", text="Self Collision", icon='PHYSICS')
if ob.mclo.self_collision:
col.prop(ob.mclo ,"self_collision_margin", text="Self Margin")#, icon='PLAY')
#pause = 'PAUSE'
#if ob.mclo.pause:
# pause = 'PLAY'
col.prop(ob.mclo ,"object_collision", text="Collider", icon="STYLUS_PRESSURE")
#if ob.mclo.object_collision:
#col.prop(ob.mclo ,"collision_animated", text="Animated", icon="POSE_DATA")
if ob.mclo.object_collision:
col.prop(ob.mclo ,"object_collision_outer_margin", text="Outer Margin", | |
excl += Outputs.template_nonsmooth
if 'raw' not in cfg.post_processing['z-scoring']['output']:
excl += Outputs.native_raw
excl += Outputs.template_raw
if not cfg.pipeline_setup['output_directory']['write_debugging_outputs']:
substring_excl.append(['desc-reginput', 'bold'])
excl += Outputs.debugging
for resource in self.rpool.keys():
if resource not in Outputs.any:
continue
if resource in excl:
continue
drop = False
for substring_list in substring_excl:
bool_list = []
for substring in substring_list:
if substring in resource:
bool_list.append(True)
else:
bool_list.append(False)
for item in bool_list:
if not item:
break
else:
drop = True
if drop:
break
if drop:
continue
subdir = 'other'
if resource in Outputs.anat:
subdir = 'anat'
#TODO: get acq- etc.
elif resource in Outputs.func:
subdir = 'func'
#TODO: other stuff like acq- etc.
for pipe_idx in self.rpool[resource]:
unique_id = self.get_name()
out_dir = cfg.pipeline_setup['output_directory']['path']
pipe_name = cfg.pipeline_setup['pipeline_name']
container = os.path.join(f'cpac_{pipe_name}', unique_id)
filename = f'{unique_id}_{resource}'
out_path = os.path.join(out_dir, container, subdir, filename)
out_dct = {
'unique_id': unique_id,
'out_dir': out_dir,
'container': container,
'subdir': subdir,
'filename': filename,
'out_path': out_path
}
self.rpool[resource][pipe_idx]['out'] = out_dct
# TODO: have to link the pipe_idx's here. and call up 'desc-preproc_T1w' from a Sources in a json and replace. here.
# TODO: can do the pipeline_description.json variants here too!
for resource in self.rpool.keys():
if resource not in Outputs.any:
continue
if resource in excl:
continue
drop = False
for substring_list in substring_excl:
bool_list = []
for substring in substring_list:
if substring in resource:
bool_list.append(True)
else:
bool_list.append(False)
for item in bool_list:
if not item:
break
else:
drop = True
if drop:
break
if drop:
continue
num_variant = 0
if len(self.rpool[resource]) == 1:
num_variant = ""
for pipe_idx in self.rpool[resource]:
pipe_x = self.get_pipe_number(pipe_idx)
try:
num_variant += 1
except TypeError:
pass
json_info = self.rpool[resource][pipe_idx]['json']
out_dct = self.rpool[resource][pipe_idx]['out']
try:
del json_info['subjson']
except KeyError:
pass
if out_dct['subdir'] == 'other' and not all:
continue
unique_id = out_dct['unique_id']
if num_variant:
for key in out_dct['filename'].split('_'):
if 'desc-' in key:
out_dct['filename'] = out_dct['filename'
].replace(key, f'{key}-{num_variant}')
resource_idx = resource.replace(key,
f'{key}-{num_variant}')
break
else:
suff = resource.split('_')[-1]
newdesc_suff = f'desc-{num_variant}_{suff}'
resource_idx = resource.replace(suff,
newdesc_suff)
else:
resource_idx = resource
id_string = pe.Node(Function(input_names=['unique_id',
'resource',
'scan_id',
'atlas_id',
'fwhm'],
output_names=['out_filename'],
function=create_id_string),
name=f'id_string_{resource_idx}_{pipe_x}')
id_string.inputs.unique_id = unique_id
id_string.inputs.resource = resource_idx
# grab the iterable scan ID
if out_dct['subdir'] == 'func':
node, out = self.rpool['scan']["['scan:func_ingress']"][
'data']
wf.connect(node, out, id_string, 'scan_id')
# grab the FWHM if smoothed
for tag in resource.split('_'):
if 'desc-' in tag and '-sm' in tag:
fwhm_idx = pipe_idx.replace(f'{resource}:', 'fwhm:')
try:
node, out = self.rpool['fwhm'][fwhm_idx]['data']
wf.connect(node, out, id_string, 'fwhm')
except KeyError:
# smoothing was not done for this resource in the
# engine.py smoothing
pass
break
atlas_suffixes = ['timeseries', 'correlations', 'statmap']
# grab the iterable atlas ID
if resource.split('_')[-1] in atlas_suffixes:
atlas_idx = pipe_idx.replace(resource, 'atlas_name')
# need the single quote and the colon inside the double
# quotes - it's the encoded pipe_idx
#atlas_idx = new_idx.replace(f"'{temp_rsc}:",
# "'atlas_name:")
if atlas_idx in self.rpool['atlas_name']:
node, out = self.rpool['atlas_name'][atlas_idx][
'data']
wf.connect(node, out, id_string, 'atlas_id')
elif 'atlas-' in resource:
for tag in resource.split('_'):
if 'atlas-' in tag:
atlas_id = tag.replace('atlas-', '')
id_string.inputs.atlas_id = atlas_id
else:
warnings.warn(str(
LookupError("\n[!] No atlas ID found for "
f"{out_dct['filename']}.\n")))
nii_name = pe.Node(Rename(), name=f'nii_{resource_idx}_'
f'{pipe_x}')
nii_name.inputs.keep_ext = True
wf.connect(id_string, 'out_filename',
nii_name, 'format_string')
node, out = self.rpool[resource][pipe_idx]['data']
wf.connect(node, out, nii_name, 'in_file')
write_json_imports = ['import os', 'import json']
write_json = pe.Node(Function(input_names=['json_data',
'filename'],
output_names=['json_file'],
function=write_output_json,
imports=write_json_imports),
name=f'json_{resource_idx}_{pipe_x}')
write_json.inputs.json_data = json_info
wf.connect(id_string, 'out_filename', write_json, 'filename')
ds = pe.Node(DataSink(), name=f'sinker_{resource_idx}_'
f'{pipe_x}')
ds.inputs.parameterization = False
ds.inputs.base_directory = out_dct['out_dir']
ds.inputs.encrypt_bucket_keys = cfg.pipeline_setup[
'Amazon-AWS']['s3_encryption']
ds.inputs.container = out_dct['container']
if cfg.pipeline_setup['Amazon-AWS'][
'aws_output_bucket_credentials']:
ds.inputs.creds_path = cfg.pipeline_setup['Amazon-AWS'][
'aws_output_bucket_credentials']
wf.connect(nii_name, 'out_file',
ds, f'{out_dct["subdir"]}.@data')
wf.connect(write_json, 'json_file',
ds, f'{out_dct["subdir"]}.@json')
class NodeBlock(object):
def __init__(self, node_block_functions):
if not isinstance(node_block_functions, list):
node_block_functions = [node_block_functions]
self.node_blocks = {}
for node_block_function in node_block_functions: # <---- sets up the NodeBlock object in case you gave it a list of node blocks instead of a single one - for option forking.
self.input_interface = []
if isinstance(node_block_function, tuple):
self.input_interface = node_block_function[1]
node_block_function = node_block_function[0]
if not isinstance(self.input_interface, list):
self.input_interface = [self.input_interface]
init_dct = self.grab_docstring_dct(node_block_function.__doc__)
name = init_dct['name']
self.name = name
self.node_blocks[name] = {}
if self.input_interface:
for interface in self.input_interface:
for orig_input in init_dct['inputs']:
if isinstance(orig_input, tuple):
list_tup = list(orig_input)
if interface[0] in list_tup:
list_tup.remove(interface[0])
list_tup.append(interface[1])
init_dct['inputs'].remove(orig_input)
init_dct['inputs'].append(tuple(list_tup))
else:
if orig_input == interface[0]:
init_dct['inputs'].remove(interface[0])
init_dct['inputs'].append(interface[1])
for key, val in init_dct.items():
self.node_blocks[name][key] = val
self.node_blocks[name]['block_function'] = node_block_function
#TODO: fix/replace below
self.outputs = {}
for out in init_dct['outputs']:
self.outputs[out] = None
self.options = ['base']
if 'options' in init_dct:
self.options = init_dct['options']
def get_name(self):
return self.name
def grab_docstring_dct(self, fn_docstring):
init_dct_schema = ['name', 'config', 'switch', 'option_key',
'option_val', 'inputs', 'outputs']
if 'Node Block:' in fn_docstring:
fn_docstring = fn_docstring.split('Node Block:')[1]
fn_docstring = fn_docstring.lstrip().replace('\n', '')
dct = ast.literal_eval(fn_docstring)
for key in init_dct_schema:
if key not in dct.keys():
raise Exception('\n[!] Developer info: At least one of the '
'required docstring keys in your node block '
'is missing.\n\nNode block docstring keys:\n'
f'{init_dct_schema}\n\nYou provided:\n'
f'{dct.keys()}\n\nDocstring:\n{fn_docstring}'
'\n\n')
return dct
def check_null(self, val):
if isinstance(val, str):
val = None if val.lower() == 'none' else val
return val
def check_output(self, outputs, label, name):
if label not in outputs:
raise Exception('\n[!] Output name in the block function does '
'not match the outputs list in Node Block '
f'{name}\n')
def grab_tiered_dct(self, cfg, key_list):
cfg_dct = cfg
for key in key_list:
cfg_dct = cfg_dct.__getitem__(key)
return cfg_dct
def connect_block(self, wf, cfg, rpool):
all_opts = []
for name, block_dct in self.node_blocks.items():
opts = []
config = self.check_null(block_dct['config'])
option_key = self.check_null(block_dct['option_key'])
option_val = self.check_null(block_dct['option_val'])
if option_key and option_val:
if not isinstance(option_key, list):
option_key = [option_key]
if not isinstance(option_val, list):
option_val = [option_val]
if config:
key_list = config + option_key
else:
key_list = option_key
if 'USER-DEFINED' in option_val:
# load custom config data into each 'opt'
opts = self.grab_tiered_dct(cfg, key_list)
else:
for option in option_val:
try:
if option in self.grab_tiered_dct(cfg, key_list): # <---- goes over the option_vals in the node block docstring, and checks if the user's pipeline config included it in the forking list
opts.append(option)
except AttributeError as err:
raise Exception(f"{err}\nNode Block: {name}")
if opts == None:
opts = [opts]
elif option_key and not option_val:
# enables multiple config forking entries
if not isinstance(option_key[0], list):
raise Exception(f'[!] The option_key field ({option_key}) '
f'for {name} exists but there is no '
'option_val.\n\nIf you are trying to '
'populate multiple option keys, the '
'option_val field must contain a list of '
'a list.\n')
for option_config in option_key:
# option_config is a list of pipe config levels down to the option
if config:
key_list = config + option_config
else:
key_list = option_config
option_val = option_config[-1]
if option_val in self.grab_tiered_dct(cfg, key_list[:-1]):
opts.append(option_val)
else: # AND, if there are multiple option-val's (in a list) in the docstring, it gets iterated below in 'for opt in option' etc. AND THAT'S WHEN YOU HAVE TO DELINEATE WITHIN THE NODE BLOCK CODE!!!
opts = [None]
all_opts += opts
for name, block_dct in self.node_blocks.items(): # <--- iterates over either the single node block in the sequence, or a list of node blocks within the list of node blocks, i.e. for option forking.
switch = self.check_null(block_dct['switch'])
config = self.check_null(block_dct['config'])
option_key = self.check_null(block_dct['option_key'])
option_val = self.check_null(block_dct['option_val'])
inputs = self.check_null(block_dct['inputs'])
outputs = self.check_null(block_dct['outputs'])
block_function = block_dct['block_function']
opts = []
if option_key and option_val:
if not isinstance(option_key, list):
option_key = [option_key]
if not isinstance(option_val, list):
option_val = [option_val]
if config:
key_list = config + option_key
else:
key_list = option_key
if 'USER-DEFINED' in option_val:
# load custom config data into each 'opt'
opts = self.grab_tiered_dct(cfg, key_list)
else:
for option in option_val:
if option in self.grab_tiered_dct(cfg, key_list): # <---- goes over the option_vals in the node block docstring, and checks if the user's pipeline config included it in the forking list
opts.append(option)
else: # AND, if there are multiple option-val's (in a list) in the docstring, it gets iterated below in 'for opt in option' etc. AND THAT'S WHEN YOU HAVE TO DELINEATE WITHIN THE NODE BLOCK CODE!!!
opts = [None] # THIS ALSO MEANS the multiple option-val's in docstring node blocks | |
import bisect
from copy import deepcopy
from Bio.Seq import Seq
from mutalyzer_crossmapper import Coding, Genomic, NonCoding
from mutalyzer_mutator.util import reverse_complement
from ..description_model import (
variant_to_description,
variants_to_description,
yield_sub_model,
)
from ..reference import (
extract_feature_model,
get_internal_selector_model,
slice_to_selector,
yield_locations,
)
from ..util import (
construct_sequence,
get_end,
get_inserted_sequence,
get_start,
set_by_path,
set_end,
set_start,
)
from .to_hgvs_coordinates import genomic_to_point, reverse_strand_shift
def to_rna_reference_model(reference_model, selector_id, transcribe=True):
"""
Get the RNA reference model of the provided selector.
1. Extract the tree corresponding to the selector from the model (including
the parents).
2. Slice the sequence.
3. Update the model features locations using the crossmapper.
TODO: Make sure everything is on the plus strand?
:arg dict reference_model: Reference model.
:arg str selector_id: Selector ID.
:arg bool transcribe: Transcribe the sequence to RNA.
:returns: RNA reference model.
:rtype: dict
"""
rna_model = {
"annotations": deepcopy(
extract_feature_model(reference_model["annotations"], selector_id)[0]
),
"sequence": {
"seq": str(
Seq(slice_to_selector(reference_model, selector_id)).transcribe()
).lower()
if transcribe
else slice_to_selector(reference_model, selector_id)
},
}
s_m = get_internal_selector_model(rna_model["annotations"], selector_id, True)
x = NonCoding(s_m["exon"]).coordinate_to_noncoding
new_start = x(s_m["exon"][0][0])[0] - 1
new_end = x(s_m["exon"][-1][-1])[0]
for location, f_type in yield_locations(rna_model["annotations"]):
if f_type == "CDS":
set_start(location, x(get_start(location))[0] - 1)
set_end(location, x(get_end(location))[0] - 1)
elif f_type == "exon":
set_start(location, x(get_start(location))[0] - 1)
set_end(location, x(get_end(location))[0] + x(get_end(location))[1] - 1)
else:
set_start(location, new_start)
set_end(location, new_end)
return rna_model
def get_position_type(position, exons, len_ss=2, len_as=5):
"""
Get the position location within the exons/introns. Even numbers for
introns and odd numbers for exons are returned. Empty introns are
considered as well in the returned index. The second returned value
represents a splice site (1, -1) or around a splice site (-2, 2) location,
otherwise 0 (within an intron outside the splice (around) sites or
within an exon).
:arg int position: Zero-based position.
:arg list exons: Zero-based half open exon positions list of tuples.
:arg int len_ss: Splice site length.
:arg int len_as: Around splice site length.
:returns: Position type.
:rtype: tuple
"""
x = NonCoding(exons).coordinate_to_noncoding
exons = _get_flatten_exons(exons)
position_x = x(position)
if position_x[1] == 0:
return bisect.bisect_right(exons, position), 0
elif 0 < abs(position_x[1]) <= len_ss:
if position_x[1] > 0:
return bisect.bisect_right(exons, position), 1
else:
return bisect.bisect_left(exons, position), -1
elif len_ss < abs(position_x[1]) <= len_ss + len_as:
if position_x[1] > 0:
return bisect.bisect_right(exons, position), 2
else:
return bisect.bisect_left(exons, position), -2
else:
return bisect.bisect_left(exons, position), 0
def _get_location_type(location, exons):
"""
Returns the location spanning with respect to the exons/introns. Currently
the supported types are: same exon (start and end in the same exon),
exon - exon (start and end in different exons), same intron,
and intron - intron.
:arg dict location: Location model.
:arg list exons: Flatten exon positions.
:returns: Location type within the exons/introns.
:rtype: str
"""
start_i = get_position_type(get_start(location), exons)
end_i = get_position_type(get_end(location) - 1, exons)
if get_start(location) == get_end(location):
# this is an insertion
if start_i[0] % 2 == 1:
return "same exon"
else:
if start_i[1] == 0:
return "same intron"
elif start_i[0] % 2 == 1 and end_i[0] % 2 == 1:
if start_i[0] == end_i[0]:
return "same exon"
else:
return "exon exon"
elif start_i[0] % 2 == 0 and end_i[0] % 2 == 0:
if start_i[0] == end_i[0] and start_i[1] == 0:
return "same intron"
if start_i[0] != end_i[0] and start_i[1] == 0 and end_i[1] == 0:
return "intron intron"
def _get_flatten_exons(exons):
"""
Transform the exon list of tuples into a list of integers.
:params list exons: Exons as a list of tuples.
:return: Flattened exons list.
:rtype: list
"""
return [e for exon in exons for e in exon]
def _get_exon_start_position(position, exons):
"""
Given an intronic position (start), get its appropriate exon position.
:arg int position: Zero-based position.
:arg list exons: Flattened exons list.
:returns: Exon position.
:rtype: int
"""
return exons[bisect.bisect_right(exons, position)]
def _get_exon_end_position(position, exons):
"""
Given an intronic position (end), get its appropriate exon position.
:arg int position: Zero-based position.
:arg list exons: Flattened exons list.
:returns: Exon position.
:rtype: int
"""
return exons[bisect.bisect_left(exons, position) - 1]
def _set_start_to_exon(location, exons):
"""
Update the location start position with its appropriate exon position.
:arg dict location: Zero-based location model.
:arg list exons: Flattened exons list.
"""
set_start(location, _get_exon_start_position(get_start(location), exons))
def _set_end_to_exon(location, exons):
"""
Update the location end position with its appropriate exon position.
:arg dict location: Zero-based location model.
:arg list exons: Flattened exons list.
"""
set_end(location, _get_exon_end_position(get_end(location), exons))
def _trim_to_exons(variants, exons, sequences):
"""
Update variants locations to the corresponding exons.
Notes:
- same intron locations are discarded;
- splice sites checked should have been performed already.
"""
new_variants = []
for v in variants:
new_v = deepcopy(v)
if v.get("location"):
location_type = _get_location_type(v["location"], exons)
if location_type == "intron intron" and not (
v.get("inserted") and construct_sequence(v["inserted"], sequences)
):
_set_start_to_exon(new_v["location"], _get_flatten_exons(exons))
_set_end_to_exon(new_v["location"], _get_flatten_exons(exons))
new_variants.append(new_v)
elif location_type == "exon exon":
new_variants.append(new_v)
elif location_type == "same exon":
new_variants.append(new_v)
return new_variants
def to_rna_variants(variants, sequences, selector_model):
"""
Convert coordinate delins variants to RNA.
:arg list variants: Variants with coordinate locations.
:arg list sequences: List with sequences dictionary.
:arg dict selector_model: Selector model.
:returns: Converted RNA variants.
:rtype: dict
"""
trimmed_variants = _trim_to_exons(variants, selector_model["exon"], sequences)
x = NonCoding(selector_model["exon"]).coordinate_to_noncoding
for variant in trimmed_variants:
if variant.get("location"):
set_start(variant["location"], x(get_start(variant))[0] - 1)
set_end(
variant["location"], x(get_end(variant))[0] + x(get_end(variant))[1] - 1
)
if variant.get("inserted"):
variant["inserted"] = [
{
"source": "description",
"sequence": get_inserted_sequence(variant, sequences),
}
]
return to_rna_sequences(trimmed_variants)
def to_rna_sequences(model):
"""
Convert all the sequences present in the model to RNA.
:args dict model: Description model.
"""
for seq, path in yield_sub_model(model, ["sequence"]):
set_by_path(model, path, str(Seq(seq).transcribe().lower()))
return model
def _point_to_cds_coordinate(point, selector_model, crossmap):
genomic_to_coordinate = Genomic().genomic_to_coordinate
if selector_model.get("inverted"):
if point.get("shift"):
point["position"] -= point["shift"]
coding = crossmap.coordinate_to_coding(point["position"], degenerate=True)
if coding[2] == -1:
return genomic_to_point(0)
else:
return genomic_to_point(genomic_to_coordinate(coding[0]))
def _get_inserted_sequence(insertion, sequences):
if isinstance(insertion["source"], str):
source = insertion["source"]
elif isinstance(insertion["source"], dict):
source = insertion["source"]["id"]
return sequences[source][
get_start(insertion["location"]) : get_end(insertion["location"])
]
def merge_inserted_to_string(inserted, sequences):
inserted_value = ""
for insertion in inserted:
if insertion.get("sequence"):
inserted_value += insertion.get("sequence")
else:
inserted_value += _get_inserted_sequence(insertion, sequences)
if insertion.get("inverted"):
inserted_value = reverse_complement(inserted_value)
return {"source": "description", "sequence": inserted_value}
def variant_to_cds_coordinate(variant, sequences, selector_model, crossmap):
new_variant = deepcopy(variant)
location = new_variant["location"]
if location["type"] == "range":
location["start"] = _point_to_cds_coordinate(
location["start"], selector_model, crossmap
)
location["end"] = _point_to_cds_coordinate(
location["end"], selector_model, crossmap
)
else:
location = _point_to_cds_coordinate(location, selector_model, crossmap)
if new_variant.get("inserted"):
new_variant["inserted"] = [
merge_inserted_to_string(new_variant["inserted"], sequences)
]
new_variant["location"] = location
return new_variant
def reverse_start_end(variants):
for variant in variants:
if variant.get("location") and variant["location"]["type"] == "range":
location = variant["location"]
location["start"], location["end"] = location["end"], location["start"]
location["start"]["position"] -= 1
location["end"]["position"] -= 1
def _get_cds_into_exons(exons, cds):
l_index = bisect.bisect_right(exons, cds[0])
r_index = bisect.bisect_left(exons, cds[1])
return [cds[0]] + exons[l_index:r_index] + [cds[1]]
def _location_in_same_intron(location, exons):
start_i = bisect.bisect_right(exons, get_start(location))
end_i = bisect.bisect_left(exons, get_end(location))
if start_i == end_i and start_i % 2 == 0:
return True
else:
return False
def _splice_site_removal(location, exons):
start_i = bisect.bisect_right(exons, get_start(location))
end_i = bisect.bisect_left(exons, get_end(location))
if end_i - start_i == 1:
return True
def _get_exons_and_cds(selector_model):
exons = [e for l in selector_model["exon"] for e in l]
cds = [selector_model["cds"][0][0], selector_model["cds"][0][1]]
if selector_model.get("inverted"):
cds[0] = exons[0]
else:
cds[1] = exons[-1]
return exons, cds
def _get_exons_and_cds_2(s_m):
exons = [e for l in s_m["exon"] for e in l]
cds = [s_m["cds"][0][0], s_m["cds"][0][1]]
return exons, cds
def to_exon_positions(variants, exons, cds):
exons = _get_cds_into_exons(exons, cds)
new_variants = []
for variant in variants:
if (
variant.get("type") == "deletion_insertion"
and variant.get("location")
and not _location_in_same_intron(variant["location"], exons)
and not (get_start(variant) <= exons[0] and get_end(variant) <= exons[0])
):
n_v = deepcopy(variant)
exon_s = bisect.bisect(exons, get_start(n_v))
if exon_s % 2 == 0 and exon_s < len(exons):
n_v["location"]["start"]["position"] = exons[exon_s]
exon_e = bisect.bisect(exons, get_end(n_v))
if exon_e % 2 == 0 and exon_e < len(exons):
n_v["location"]["end"]["position"] = exons[exon_e]
new_variants.append(n_v)
return new_variants
def _get_splice_site_hits(variants, exons, cds):
hits = []
for i, variant in enumerate(variants):
if (
variant.get("type") == "deletion_insertion"
and variant.get("location")
and _splice_site_removal(
variant["location"], _get_cds_into_exons(exons, cds)
)
):
hits.append(i)
return hits
def reverse_variants(variants, sequences):
reversed_variants = deepcopy(variants)
reverse_strand_shift(reversed_variants, sequences["reference"])
reverse_start_end(reversed_variants)
return reversed_variants
def to_rna_protein_coordinates(variants, sequences, selector_model):
"""
Converts the locations to cds equivalent.
:param variants: Variants with locations in the coordinate system.
:param sequences: Sequences with their ids as keys.
:param selector_model: Selector model according to which
the conversion is performed.
"""
exons, cds = _get_exons_and_cds(selector_model)
crossmap = Coding(selector_model["exon"], cds, selector_model["inverted"])
if selector_model.get("inverted"):
variants = reverse_variants(variants, sequences)
| |
# -*- coding: utf-8 -*-
# Licensed under The MIT License
#
# Parses layour DXF file format.
import dxfgrabber
import sys
class _Line(object):
def __init__(self, start, end):
self._start = start
self._end = end
def _get_other_end(self, c):
if c == self._start:
return self._end
if c == self._end:
return self._start
raise Exception('Unknown end of line')
class Strand(object):
def __init__(self, id):
self._id = id
self._coords = []
def get_id(self):
return self._id
def get_coords(self):
return list(self._coords)
class _StrandBuilder(object):
def __init__(self, min_x, max_x, min_y, max_y):
self._min_x = min_x
self._max_x = max_x
self._range_x = float(max_x - min_x)
self._min_y = min_y
self._max_y = max_y
self._range_y = float(max_y - min_y)
self._enable_horizontal_mirror = False
self._coords = []
def enable_horizontal_mirror(self):
self._enable_horizontal_mirror = True
# Note that x/y here are [0, 1] that will be mapped to [min, max] range.
def add_custom_coords(self, count, start_x, end_x, start_y, end_y, is_abs):
if self._enable_horizontal_mirror:
start_x = 1.0 - start_x
end_x = 1.0 - end_x
start_x = float(start_x)
end_x = float(end_x)
start_y = float(start_y)
end_y = float(end_y)
step_x = (end_x - start_x) / (count - 1)
step_y = (end_y - start_y) / (count - 1)
for i in xrange(count):
if is_abs:
x = (start_x + step_x * i) + self._min_x
y = (start_y + step_y * i) + self._min_y
else:
x = (start_x + step_x * i) * self._range_x + self._min_x
y = (start_y + step_y * i) * self._range_y + self._min_y
self.add_one_abs(x, y)
def add_horizontal_rel(self, count, start_x, end_x, y):
self.add_custom_coords(count, start_x, end_x, y, y, False)
def add_vertical_abs(self, count, x, start_y, end_y):
self.add_custom_coords(count, x, x, start_y, end_y, True)
def add_one_abs(self, x, y):
if x < self._min_x or x > self._max_x:
raise Exception('x out of range: %s (%s / %s)' % (
x, self._min_x, self._max_x))
if y < self._min_y or y > self._max_y:
raise Exception('y out of range: %s (%s / %s)' % (
y, self._min_y, self._max_y))
self._coords.append((int(round(x)), int(round(y))))
def get_coords(self):
return list(self._coords)
def clear(self):
self._coords = []
class TclLayout(object):
def __init__(self, file_path, max_x, max_y):
self._strands = {}
self._file_path = file_path
self._max_x = max_x
self._max_y = max_y
dxf = dxfgrabber.readfile(file_path)
self._parse(dxf)
self._normalize()
self._customize()
self._print_info()
def get_strands(self):
return list(self._strands.values())
def get_strand_coords(self, id):
if id >= len(self._strands):
return None
return self._strands[id].get_coords()
def get_all_coords(self):
result = []
def get_coords(self):
return list(self._coords)
class TclLayout(object):
def __init__(self, file_path, max_x, max_y):
self._strands = {}
self._file_path = file_path
self._max_x = max_x
self._max_y = max_y
dxf = dxfgrabber.readfile(file_path)
self._parse(dxf)
self._normalize()
self._customize()
self._print_info()
def get_strands(self):
return list(self._strands.values())
def get_strand_coords(self, id):
if id >= len(self._strands):
return None
return self._strands[id].get_coords()
def get_all_coords(self):
result = []
for s in self._strands.values():
result += s._coords
return result
def _print_info(self):
print 'Layout "%s", dst_width=%s, dst_height=%s' % (
self._file_path, self._max_x + 1, self._max_y + 1)
for s in self._strands.values():
min_x, max_x, min_y, max_y = self._find_min_max(s._coords)
print ' Strand P%s, led_count=%s, x=(%s-%s), y=(%s-%s)' % (
s._id + 1, len(s._coords), min_x, max_x, min_y, max_y)
#for coord in s._coords:
# print ' %s %s' % coord
def _parse(self, dxf):
anchors = {}
circles = set()
dots = {}
for e in dxf.entities:
type = e.dxftype
if e.dxftype == 'TEXT':
if (len(e.text) != 2 or e.text[0] != 'p' or
e.text[1] < '1' or e.text[1] > '8'):
raise Exception('Unsupported anchor text "%s"' % e.text)
id = int(e.text[1]) - 1
if id in anchors:
raise Exception('More than one id of %s', id)
anchors[id] = self._get_coord(e.insert)
elif e.dxftype == 'CIRCLE':
center = self._get_coord(e.center)
if center in circles:
raise Exception('More than one circle at %s', [center])
circles.add(center)
elif e.dxftype == 'LINE':
start = self._get_coord(e.start)
end = self._get_coord(e.end)
if start == end:
raise Exception('Line of zero length: %s' % [start])
line = _Line(start, end)
self._add_dot(dots, start, line)
self._add_dot(dots, end, line)
else:
raise Exception('Unsupported DXF entity type %s', e.dxftype)
for id, id_coord in anchors.items():
strand = Strand(id)
self._strands[id] = strand
prev_coord = id_coord
while True:
if prev_coord not in dots:
raise Exception('No lines found to originate from %s' % [prev_coord])
if len(dots[prev_coord]) == 0:
raise Exception('All lines were consumed for %s' % [prev_coord])
if len(dots[prev_coord]) > 1:
raise Exception('More than one line starts at %s' % [prev_coord])
in_line = dots[prev_coord][0]
coord = in_line._get_other_end(prev_coord)
dots[prev_coord] = []
if coord not in circles:
raise Exception('No circle found for %s', [coord])
circles.remove(coord)
strand._coords.append(coord)
dots[coord].remove(in_line)
if len(dots[coord]) == 0:
break
prev_coord = coord
if len(circles):
raise Exception('Some circles remain unconsumed: %s', circles)
for coord, lines in dots.items():
if len(lines) != 0:
raise Exception('Some dots remain unconsumed: %s', [coord])
def _get_coord(self, c):
if len(c) == 3 and c[2] != 0:
raise Exception('Non-zero Z coordinate in %s' % [c])
return (float(c[0]), float(c[1]))
def _add_dot(self, dots, coord, line):
if coord not in dots:
dots[coord] = []
dots[coord].append(line)
if len(dots[coord]) > 2:
raise Exception('More than one line connected at one dot %s', [coord])
def _normalize(self):
# Make all coordinates be in range of [0-max_xy].
min_x, max_x, min_y, max_y = self._find_min_max(self._get_all_coords())
width = max_x - min_x
height = max_y - min_y
for s in self._strands.values():
new_coords = []
for c in s._coords:
x = (c[0] - min_x) / width * self._max_x
y = (c[1] - min_y) / height * self._max_y
y = self._max_y - y
new_coords.append((int(round(x)), int(round(y))))
s._coords = new_coords
def _get_all_coords(self):
result = []
for s in self._strands.values():
result += s._coords
return result
def _find_min_max(self, coords):
min_x = sys.float_info.max
min_y = sys.float_info.max
max_x = sys.float_info.min
max_y = sys.float_info.min
for c in coords:
x, y = c[0], c[1]
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if y < min_y:
min_y = y
if y > max_y:
max_y = y
return (min_x, max_x, min_y, max_y)
# The place of terrible hacks, because I have no time to mod the DXF.
def _customize(self):
if self._file_path == 'dfplayer/layout1.dxf':
self._make_new_flipper(2, 0, 4, False)
self._make_new_flipper(1, 3, 5, True)
self._make_new_tail(2, 6, False)
self._make_new_tail(3, 7, True)
return
if self._file_path == 'dfplayer/layout3.dxf':
#self._make_dorsal_fin()
self._make_dorsal_fin_reverse()
return
def _make_new_tail(self, old_strand_id, new_strand_id, is_driver):
if new_strand_id in self._strands:
raise Exception('Strand already exists')
old_cutoff_coord_id = 202
old_coords = self._strands[old_strand_id]._coords
min_x, max_x, min_y, max_y = self._find_min_max(
old_coords[old_cutoff_coord_id:])
del old_coords[old_cutoff_coord_id:]
builder = _StrandBuilder(min_x, max_x, min_y, max_y)
if not is_driver:
builder.enable_horizontal_mirror()
y_step = 1.0 / 7.0
width = 20.0 # In feet.
builder.add_horizontal_rel(62, 0, 20.0 / width, 0)
builder.add_horizontal_rel(51, 0, 20.0 / width, y_step)
builder.add_horizontal_rel(41, 0, 16.0 / width, y_step * 2)
builder.add_horizontal_rel(39, 0, 14.0 / width, y_step * 3)
builder.add_horizontal_rel(36, 0, 13.0 / width, y_step * 4)
builder.add_horizontal_rel(38, 0, 14.0 / width, y_step * 5)
builder.add_horizontal_rel(42, 0, 16.0 / width, y_step * 6)
builder.add_horizontal_rel(51, 0, 20.0 / width, y_step * 7)
self._add_strand_builder(new_strand_id, builder)
def _make_new_flipper(self, strand1, strand2, new_strand_id, is_driver):
if new_strand_id in self._strands:
raise Exception('Strand already exists')
min_x1, max_x1, min_y1, max_y1 = self._find_min_max(
self._strands[strand1]._coords)
min_x2, max_x2, min_y2, max_y2 = self._find_min_max(
self._strands[strand2]._coords)
min_x, max_x = max_x1 + 2, min_x2 - 2
min_y, max_y = min(min_y1, min_y2), max(max_y1, max_y2)
builder = _StrandBuilder(min_x, max_x, min_y, max_y)
y_step = 1.0 / 9.0
width = 5.0 # In feet.
if is_driver:
builder.add_horizontal_rel(32, 0, 5.0 / width, 0)
builder.add_horizontal_rel(25, 0, 5.0 / width, y_step)
builder.add_horizontal_rel(25, 0, 5.0 / width, y_step * 2)
builder.add_horizontal_rel(17, 0, 3.0 / width, y_step * 3)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 4)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 5)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 6)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 7)
builder.add_horizontal_rel(15, 0, 3.0 / width, y_step * 8)
builder.add_horizontal_rel(14, 0, 3.0 / width, y_step * 9)
else:
builder.enable_horizontal_mirror()
builder.add_horizontal_rel(32, 0, 5.0 / width, 0)
builder.add_horizontal_rel(26, 0, 5.0 / width, y_step)
builder.add_horizontal_rel(26, 0, 5.0 / width, y_step * 2)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 3)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 4)
builder.add_horizontal_rel(18, 0, 3.0 / width, y_step * 5)
builder.add_horizontal_rel(17, 0, 3.0 / width, y_step * 6)
builder.add_horizontal_rel(17, 0, 3.0 / width, y_step * 7)
builder.add_horizontal_rel(15, 0, 3.0 / width, y_step * 8)
builder.add_horizontal_rel(15, 0, 3.0 / width, y_step * 9)
self._add_strand_builder(new_strand_id, builder)
def _add_strand_builder(self, strand_id, builder):
self._strands[strand_id] = Strand(strand_id)
self._strands[strand_id]._coords = builder.get_coords()
builder.clear()
def _make_dorsal_fin(self):
b = _StrandBuilder(0, 64, | |
<reponame>SongweiGe/DoodlerGAN
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import torch
import numpy as np
import argparse
import torchvision
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
from retry.api import retry_call
from torch.utils import data
from torchvision import transforms
from part_selector import Trainer as Trainer_selector
from part_generator import Trainer as Trainer_cond_unet
from scipy.ndimage.morphology import distance_transform_edt
COLORS = {'initial':1-torch.cuda.FloatTensor([45, 169, 145]).view(1, -1, 1, 1)/255., 'eye':1-torch.cuda.FloatTensor([243, 156, 18]).view(1, -1, 1, 1)/255., 'none':1-torch.cuda.FloatTensor([149, 165, 166]).view(1, -1, 1, 1)/255.,
'arms':1-torch.cuda.FloatTensor([211, 84, 0]).view(1, -1, 1, 1)/255., 'beak':1-torch.cuda.FloatTensor([41, 128, 185]).view(1, -1, 1, 1)/255., 'mouth':1-torch.cuda.FloatTensor([54, 153, 219]).view(1, -1, 1, 1)/255.,
'body':1-torch.cuda.FloatTensor([192, 57, 43]).view(1, -1, 1, 1)/255., 'ears':1-torch.cuda.FloatTensor([142, 68, 173]).view(1, -1, 1, 1)/255., 'feet':1-torch.cuda.FloatTensor([39, 174, 96]).view(1, -1, 1, 1)/255.,
'fin':1-torch.cuda.FloatTensor([69, 85, 101]).view(1, -1, 1, 1)/255., 'hair':1-torch.cuda.FloatTensor([127, 140, 141]).view(1, -1, 1, 1)/255., 'hands':1-torch.cuda.FloatTensor([45, 63, 81]).view(1, -1, 1, 1)/255.,
'head':1-torch.cuda.FloatTensor([241, 197, 17]).view(1, -1, 1, 1)/255., 'horns':1-torch.cuda.FloatTensor([51, 205, 117]).view(1, -1, 1, 1)/255., 'legs':1-torch.cuda.FloatTensor([232, 135, 50]).view(1, -1, 1, 1)/255.,
'nose':1-torch.cuda.FloatTensor([233, 90, 75]).view(1, -1, 1, 1)/255., 'paws':1-torch.cuda.FloatTensor([160, 98, 186]).view(1, -1, 1, 1)/255., 'tail':1-torch.cuda.FloatTensor([58, 78, 99]).view(1, -1, 1, 1)/255.,
'wings':1-torch.cuda.FloatTensor([198, 203, 207]).view(1, -1, 1, 1)/255., 'details':1-torch.cuda.FloatTensor([171, 190, 191]).view(1, -1, 1, 1)/255.}
class Initialstroke_Dataset(data.Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for p in Path(f'{folder}').glob(f'**/*.png')]
self.transform = transforms.Compose([
transforms.ToTensor(),
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = self.transform(Image.open(path))
return img
def sample(self, n):
sample_ids = [np.random.randint(self.__len__()) for _ in range(n)]
samples = [self.transform(Image.open(self.paths[sample_id])) for sample_id in sample_ids]
return torch.stack(samples).cuda()
def load_latest(model_dir, name):
model_dir = Path(model_dir)
file_paths = [p for p in Path(model_dir / name).glob('model_*.pt')]
saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))
if len(saved_nums) == 0:
return
num = saved_nums[-1]
print(f'continuing -{name} from previous epoch - {num}')
return num
def noise(n, latent_dim):
return torch.randn(n, latent_dim).cuda()
def noise_list(n, layers, latent_dim):
return [(noise(n, latent_dim), layers)]
def mixed_list(n, layers, latent_dim):
tt = int(torch.rand(()).numpy() * layers)
return noise_list(n, tt, latent_dim) + noise_list(n, layers - tt, latent_dim)
def image_noise(n, im_size):
return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).cuda()
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def evaluate_in_chunks_unet(max_batch_size, model, map_feats, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
split_map_feats = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), map_feats))))
chunked_outputs = [model(*i, j) for i, j in zip(split_args, split_map_feats)]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def styles_def_to_tensor(styles_def):
return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)
def gs_to_rgb(image, color):
image_rgb = image.repeat(1, 3, 1, 1)
return 1-image_rgb*color
@torch.no_grad()
def generate_truncated(S, G, style, noi, trunc_psi = 0.75, num_image_tiles = 8, bitmap_feats=None, batch_size=8):
latent_dim = G.latent_dim
z = noise(2000, latent_dim)
samples = evaluate_in_chunks(batch_size, S, z).cpu().numpy()
av = np.mean(samples, axis = 0)
av = np.expand_dims(av, axis = 0)
w_space = []
for tensor, num_layers in style:
tmp = S(tensor)
av_torch = torch.from_numpy(av).cuda()
# import ipdb;ipdb.set_trace()
tmp = trunc_psi * (tmp - av_torch) + av_torch
w_space.append((tmp, num_layers))
w_styles = styles_def_to_tensor(w_space)
generated_images = evaluate_in_chunks_unet(batch_size, G, bitmap_feats, w_styles, noi)
return generated_images.clamp_(0., 1.)
@torch.no_grad()
def generate_part(model, partial_image, partial_rgb, color=None, part_name=20, num=0, num_image_tiles=8, trunc_psi=1., save_img=False, trans_std=2, results_dir='../results/bird_seq_unet_5fold'):
model.eval()
ext = 'png'
num_rows = num_image_tiles
latent_dim = model.G.latent_dim
image_size = model.G.image_size
num_layers = model.G.num_layers
def translate_image(image, trans_std=2, rot_std=3, scale_std=2):
affine_image = torch.zeros_like(image)
side = image.shape[-1]
x_shift = np.random.normal(0, trans_std)
y_shift = np.random.normal(0, trans_std)
theta = np.random.normal(0, rot_std)
scale = int(np.random.normal(0, scale_std))
T = np.float32([[1, 0, x_shift], [0, 1, y_shift]])
M = cv2.getRotationMatrix2D((side/2,side/2),theta,1)
for i in range(image.shape[1]):
sketch_channel = image[0, i].cpu().data.numpy()
sketch_translation = cv2.warpAffine(sketch_channel, T, (side, side))
affine_image[0, i] = torch.cuda.FloatTensor(sketch_translation)
return affine_image, x_shift, y_shift, theta, scale
def recover_image(image, x_shift, y_shift, theta, scale):
x_shift *= -1
y_shift *= -1
theta *= -1
# scale *= -1
affine_image = torch.zeros_like(image)
side = image.shape[-1]
T = np.float32([[1, 0, x_shift], [0, 1, y_shift]])
M = cv2.getRotationMatrix2D((side/2,side/2),theta,1)
for i in range(image.shape[1]):
sketch_channel = image[0, i].cpu().data.numpy()
sketch_translation = cv2.warpAffine(sketch_channel, T, (side, side))
affine_image[0, i] = torch.cuda.FloatTensor(sketch_translation)
return affine_image
# latents and noise
latents_z = noise_list(num_rows ** 2, num_layers, latent_dim)
n = image_noise(num_rows ** 2, image_size)
image_partial_batch = partial_image[:, -1:, :, :]
translated_image, dx, dy, theta, scale = translate_image(partial_image, trans_std=trans_std)
bitmap_feats = model.Enc(translated_image)
# bitmap_feats = model.Enc(partial_image)
# generated_partial_images = generate_truncated(model.S, model.G, latents_z, n, trunc_psi = trunc_psi, bitmap_feats=bitmap_feats)
generated_partial_images = recover_image(generate_truncated(model.S, model.G, latents_z, n, trunc_psi = trunc_psi, bitmap_feats=bitmap_feats), dx, dy, theta, scale)
# post process
generated_partial_rgb = gs_to_rgb(generated_partial_images, color)
generated_images = generated_partial_images + image_partial_batch
generated_rgb = 1 - ((1-generated_partial_rgb)+(1-partial_rgb))
if save_img:
torchvision.utils.save_image(generated_partial_rgb, os.path.join(results_dir, f'{str(num)}-{part_name}-comp.{ext}'), nrow=num_rows)
torchvision.utils.save_image(generated_rgb, os.path.join(results_dir, f'{str(num)}-{part_name}.{ext}'), nrow=num_rows)
return generated_partial_images.clamp_(0., 1.), generated_images.clamp_(0., 1.), generated_partial_rgb.clamp_(0., 1.), generated_rgb.clamp_(0., 1.)
def train_from_folder(
data_path = '../../data',
results_dir = '../../results',
models_dir = '../../models',
n_part = 1,
image_size = 128,
network_capacity = 16,
batch_size = 3,
num_image_tiles = 8,
trunc_psi = 0.75,
generate_all=False,
):
min_step = 599
name_eye='long_generic_creative_sequential_r6_partstack_aug_eye_unet_largeaug'
load_from = load_latest(models_dir, name_eye)
load_from = min(min_step, load_from)
model_eye = Trainer_cond_unet(name_eye, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_eye.load_config()
model_eye.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_eye, load_from)))
name_head='long_generic_creative_sequential_r6_partstack_aug_head_unet_largeaug'
load_from = load_latest(models_dir, name_head)
load_from = min(min_step, load_from)
model_head = Trainer_cond_unet(name_head, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_head.load_config()
model_head.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_head, load_from)))
name_body='long_generic_creative_sequential_r6_partstack_aug_body_unet_largeaug'
load_from = load_latest(models_dir, name_body)
load_from = min(min_step, load_from)
model_body = Trainer_cond_unet(name_body, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_body.load_config()
model_body.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_body, load_from)))
name_beak='long_generic_creative_sequential_r6_partstack_aug_beak_unet_largeaug'
load_from = load_latest(models_dir, name_beak)
load_from = min(min_step, load_from)
model_beak = Trainer_cond_unet(name_beak, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_beak.load_config()
model_beak.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_beak, load_from)))
name_ears='long_generic_creative_sequential_r6_partstack_aug_ears_unet_largeaug'
load_from = load_latest(models_dir, name_ears)
load_from = min(min_step, load_from)
model_ears = Trainer_cond_unet(name_ears, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_ears.load_config()
model_ears.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_ears, load_from)))
name_hands='long_generic_creative_sequential_r6_partstack_aug_hands_unet_largeaug'
load_from = load_latest(models_dir, name_hands)
load_from = min(min_step, load_from)
model_hands = Trainer_cond_unet(name_hands, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_hands.load_config()
model_hands.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_hands, load_from)))
name_legs='long_generic_creative_sequential_r6_partstack_aug_legs_unet_largeaug'
load_from = load_latest(models_dir, name_legs)
load_from = min(min_step, load_from)
model_legs = Trainer_cond_unet(name_legs, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_legs.load_config()
model_legs.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_legs, load_from)))
name_feet='long_generic_creative_sequential_r6_partstack_aug_feet_unet_largeaug'
load_from = load_latest(models_dir, name_feet)
load_from = min(min_step, load_from)
model_feet = Trainer_cond_unet(name_feet, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_feet.load_config()
model_feet.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_feet, load_from)))
name_wings='long_generic_creative_sequential_r6_partstack_aug_wings_unet_largeaug'
load_from = load_latest(models_dir, name_wings)
load_from = min(min_step, load_from)
model_wings = Trainer_cond_unet(name_wings, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_wings.load_config()
model_wings.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_wings, load_from)))
name_mouth='long_generic_creative_sequential_r6_partstack_aug_mouth_unet_largeaug'
load_from = load_latest(models_dir, name_mouth)
load_from = min(min_step, load_from)
model_mouth = Trainer_cond_unet(name_mouth, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_mouth.load_config()
model_mouth.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_mouth, load_from)))
name_nose='long_generic_creative_sequential_r6_partstack_aug_nose_unet_largeaug'
load_from = load_latest(models_dir, name_nose)
load_from = min(min_step, load_from)
model_nose = Trainer_cond_unet(name_nose, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_nose.load_config()
model_nose.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_nose, load_from)))
name_hair='long_generic_creative_sequential_r6_partstack_aug_hair_unet_largeaug'
load_from = load_latest(models_dir, name_hair)
load_from = min(min_step, load_from)
model_hair = Trainer_cond_unet(name_hair, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_hair.load_config()
model_hair.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_hair, load_from)))
name_tail='long_generic_creative_sequential_r6_partstack_aug_tail_unet_largeaug'
load_from = load_latest(models_dir, name_tail)
load_from = min(min_step, load_from)
model_tail = Trainer_cond_unet(name_tail, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_tail.load_config()
model_tail.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_tail, load_from)))
name_fin='long_generic_creative_sequential_r6_partstack_aug_fin_unet_largeaug'
load_from = load_latest(models_dir, name_fin)
load_from = min(min_step, load_from)
model_fin = Trainer_cond_unet(name_fin, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_fin.load_config()
model_fin.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_fin, load_from)))
name_horns='long_generic_creative_sequential_r6_partstack_aug_horns_unet_largeaug'
load_from = load_latest(models_dir, name_horns)
load_from = min(min_step, load_from)
model_horns = Trainer_cond_unet(name_horns, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_horns.load_config()
model_horns.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_horns, load_from)))
name_paws='long_generic_creative_sequential_r6_partstack_aug_paws_unet_largeaug'
load_from = load_latest(models_dir, name_paws)
load_from = min(min_step, load_from)
model_paws = Trainer_cond_unet(name_paws, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_paws.load_config()
model_paws.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_paws, load_from)))
name_arms='long_generic_creative_sequential_r6_partstack_aug_arms_unet_largeaug'
load_from = load_latest(models_dir, name_arms)
load_from = min(min_step, load_from)
model_arms = Trainer_cond_unet(name_arms, results_dir, models_dir, n_part=n_part, batch_size=batch_size, image_size=image_size, network_capacity=network_capacity)
model_arms.load_config()
model_arms.GAN.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_arms, load_from)))
name_selector='long_generic_creative_selector_aug'
load_from = load_latest(models_dir, name_selector)
part_selector = Trainer_selector(name_selector, results_dir, models_dir, n_part = n_part, batch_size = batch_size, image_size = image_size, network_capacity=network_capacity)
part_selector.load_config()
part_selector.clf.load_state_dict(torch.load('%s/%s/model_%d.pt'%(models_dir, name_selector, load_from)))
inital_dir = '%s/generic_long_test_init_strokes_%d'%(data_path, image_size)
if not os.path.exists(results_dir):
os.mkdir(results_dir)
dataset = Initialstroke_Dataset(inital_dir, image_size=image_size)
dataloader = data.DataLoader(dataset, num_workers=5, batch_size=batch_size, drop_last=False, shuffle=False, pin_memory=True)
models = [model_eye, model_arms, model_beak, model_mouth, model_body, model_ears, model_feet, model_fin, model_hair,
model_hands, model_head, model_horns, model_legs, model_nose, model_paws, model_tail, model_wings]
target_parts = ['eye', 'arms', 'beak', 'mouth', 'body', 'ears', 'feet', 'fin',
'hair', 'hands', 'head', 'horns', 'legs', 'nose', 'paws', 'tail', 'wings', 'none']
part_to_id = {'initial': 0, 'eye': 1, 'arms': 2, 'beak': 3, 'mouth': 4, 'body': 5, 'ears': 6, 'feet': 7, | |
<reponame>TolaAbiodun/pyavo
# -*- coding: utf-8 -*-
"""
Class for modelling fluid properties for gas, oil and brine saturated sands.
References: Wang(2001), Batzle and Wang(1992), Geophysics.
"""
import warnings
import math
from typing import Union
warnings.filterwarnings("ignore")
def k_rho_matrix(v_cly: float, k_cly: float, k_qtz: float, rho_cly: float, rho_qtz: float) -> tuple:
"""
Calculate the Bulk modulus and Density of rock matrix.
:param v_cly: Volume of clay assumed to be 70% of Shale volume
:param k_cly: Bulk modulus of clay (Gpa)
:param k_qtz: Bulk modulus of quartz (Gpa)
:param rho_cly: Density of clay (g/cc)
:param rho_qtz: Density of quartz (g/cc)
:returns:
k_mat : Bulk modulus of rock matrix,
rho_mat : Density of rock matrix
"""
v_qtz = 1 - v_cly
k_voigt = v_cly * k_cly + v_qtz * k_qtz
k_reuss = 1 / (v_cly / k_cly + v_qtz / k_qtz)
k_mat = 0.5 * (k_voigt + k_reuss)
rho_mat = v_cly * rho_cly + v_qtz * rho_qtz
# print("The Bulk modulus(Gpa) and Density(g/cc) of the Matrix:")
return round(k_mat, 2), round(rho_mat, 2)
def vel_sat(k_sat: float, rho_sat: float, mu: float) -> tuple:
"""
Estimate the seismic velocities after Gassmann fluid substitution using density and
elastic moduli of saturated rock.
:returns:
vp_new : P-wave velocity,
vs_new : S-wave velocity
"""
f = 3280.84
vp_new = math.sqrt((k_sat + (mu * 4 / 3)) / rho_sat) * f
vs_new = math.sqrt(mu / rho_sat) * f
return round(vp_new, 2), round(vs_new, 2)
class GassmannSub(object):
"""
Class to model Gassmann fluid substitution for brine sands, oil sands, and gas sands.
It generates the P and S wave velocities and density after fluid substitution according to input parameters.
Arguments:
vp = P-wave velocity from log (ft/s)
vs = S-wave velocity from log (ft/s)
rho = Bulk density form log (g/cc)
rho_o = Oil gravity (deg API)
rho_g = Gas gravity (API)
vsh = Shale volume from log
phi = Porosity
swi = Initial water saturation from log
swt = Target water saturation
S = Salinity (ppm)
T = Temperature (deg)
P = Pressure (psi)
init_fluid = Fluid type of initial hydrocarbon (gas or oil)
final_fluid = Fluid type of desired output where (gas or oil)
GOR = Gas-Oil ratio
"""
def __init__(self, vp: Union[int, float], vs: Union[int, float], rho: Union[int, float], rho_o: Union[int, float],
rho_g: Union[int, float], vsh: float, phi: float, swi: float, swt: float, S: float,
T: Union[int, float], P: Union[int, float], init_fluid: str, final_fluid: str, GOR: float):
try:
self.vp = vp
self.vs = vs
self.rho = rho
self.rho_o = rho_o
self.rho_g = rho_g
self.vsh = vsh
self.phi = phi
self.swi = swi
self.swt = swt
self.S = S
self.T = T
self.P = P
self.init_fluid = init_fluid
self.final_fluid = final_fluid
self.GOR = GOR
except ValueError as err:
print(f'Input right format {err}')
except TypeError as err:
print(f'Input right format {err}')
def k_rho_brine(self) -> tuple:
"""
Computes the bulk modulus(Gpa) and density(g/cc) of brine.
:return: Bulk Modulus, Density
"""
# Coefficients for water velocity computation(Batzle and Wang, 1992)
w11 = 1402.85
w21 = 4.871
w31 = -0.04783
w41 = 1.487e-4
w51 = -2.197e-7
w12 = 1.524
w22 = -0.0111
w32 = 2.747e-4
w42 = -6503e-7
w52 = 7.987e-10
w13 = 3.437e-3
w23 = 1.739e-4
w33 = -2.135e-6
w43 = -1.455e-8
w53 = 5.23e-11
w14 = -1.197e-5
w24 = -1.628e-6
w34 = 1.237e-8
w44 = 1.327e-10
w54 = -4.614e-13
# Create a dictionary for the constants
constants_dict = {
'w11': w11,
'w21': w21,
'w31': w31,
'w41': w41,
'w51': w51,
'w12': w12,
'w22': w22,
'w32': w32,
'w42': w42,
'w52': w52,
'w13': w13,
'w23': w23,
'w33': w33,
'w43': w43,
'w53': w53,
'w14': w14,
'w24': w24,
'w34': w34,
'w44': w44,
'w54': w54
}
# Convert Pressure(psi) to Mpa
P = self.P * 6.894757 * 0.001
# Express salinity(ppm) as weight fraction
S = self.S * 1e-6
vw = 0
for i in range(1, 6):
for j in range(1, 5):
constant_key = 'w' + str(i) + str(j)
constant = constants_dict[constant_key]
# print(constant)
vw += (constant) * (self.T ** (i - 1)) * (P ** (j - 1))
v1 = 1170 - 9.6 * self.T + 0.055 * self.T * self.T - 8.5 * 10 ** (
-5) * self.T * self.T * self.T + 2.6 * P - (0.0029 * self.T * P) - (0.0476 * P**2)
v_brine = vw + S * v1 + S ** 1.5 * (780 - 10 * P + 0.16 * P * P) - 1820 * S * S
r1 = 489 * P - 2 * self.T * P + 0.016 * self.T * self.T * P - 1.3 * 10 ** (
-5) * self.T * self.T * self.T * P - 0.333 * P * P - 0.002 * self.T * P * P
rho_water = 1 + 10 ** (-6) * (-80 * self.T - 3.3 * self.T * self.T + 0.00175 * self.T * self.T * self.T + r1)
r2 = 300 * P - 2400 * P * S + self.T * (80 + 3 * self.T - 3300 * S - 13 * P + 47 * P * S)
rho_brine = rho_water + 0.668 * S + 0.44 * S * S + 10 ** (-6) * S * r2
k_brine = rho_brine * v_brine**2 * 1e-6
return k_brine, round(rho_brine, 3)
# Function to estimate initial hydrocarbon properties
def init_hyc(self) -> tuple:
"""
Computes Bulk modulus and density of initial hydrocarbon.
:return: k_hyc: Bulk modulus,
rho_hyc: Density
"""
rho_hyc = 0
k_hyc = 0
if self.init_fluid == 'oil': # Default is oil in this case
P = self.P * 6.894757 * 0.001 # convert Pressure from Psi to Mpa
rho_o = 141.5 / (self.rho_o + 131.5)
div_mill = 1 / 1000000
Bo = 0.972 + 0.00038 * (2.495 * self.GOR * math.sqrt(self.rho_g / rho_o) + self.T + 17.8) ** 1.175
rho_p = rho_o / ((1 + 0.001 * self.GOR) * Bo)
rho_s = (rho_o + 0.0012 * self.GOR * self.rho_g) / Bo
r = rho_s + (0.00277 * P - 1.71 * 0.0000001 * P * P * P) * (rho_s - 1.15) ** 2 + (3.49 * 0.0001 * P)
rho_hyc += r / (0.972 + 3.81 * 0.0001 * (self.T + 17.78) ** 1.175)
y = math.sqrt(18.33 / rho_p - 16.97)
vel = 2096 * math.sqrt(rho_p / (2.6 - rho_p)) - 3.7 * self.T + 4.64 * P + 0.0115 * (y - 1) * self.T * P
k_hyc += rho_hyc * vel * vel * div_mill
# print('Bulk modulus(Gpa) and Density(g/cc) of initial fluid (oil)')
elif self.init_fluid == 'gas':
# Only gas is present in the reservoir. A gas sand case
k_hyc = 0
rho_hyc = 0
R = 8.314
P = self.P * 6.894757 * 0.001 # convert Pressure from Psi to Mpa
Tabs = self.T + 273.15
Ppr = P / (4.892 - 0.4048 * self.rho_g)
Tpr = Tabs / (94.72 + 170.75 * self.rho_g)
E1 = math.exp(-Ppr ** 1.2 / Tpr * (0.45 + 8 * (0.56 - 1 / Tpr) ** 2))
E = 0.109 * (3.85 - Tpr) ** 2 * E1
Z1 = 0.03 + 0.00527 * (3.5 - Tpr) ** 3
Z = Z1 * Ppr + 0.642 * Tpr - 0.007 * Tpr ** 4 - 0.52 + E
rho_hyc += 28.8 * self.rho_g * P / (Z * R * Tabs)
F = -1.2 * Ppr ** 0.2 / Tpr * (0.45 + 8 * (0.56 - 1 / Tpr)) * E1
dz_dp = Z1 + 0.109 * (3.85 - Tpr) ** 2 * F
yo = 0.85 + 5.6 / (Ppr + 2) + 27.1 / (Ppr + 3.5) ** 2 - (8.7 * math.exp(-0.65 * (Ppr + 1)))
k_hyc += P / (1 - (Ppr / Z * dz_dp)) * yo / 1000
# print('Bulk modulus(GPa) and Density(g/cc) of initial fluid (gas)')
| |
<filename>tests/st/ops/cpu/test_resize_bilinear_op.py<gh_stars>1000+
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import context, Tensor
from mindspore.ops import operations as P
from mindspore import nn
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class NetResizeBilinear(nn.Cell):
def __init__(self, size=None, align_corner=False):
super(NetResizeBilinear, self).__init__()
self.op = P.ResizeBilinear(size=size, align_corners=align_corner)
def construct(self, inputs):
return self.op(inputs)
def test_resize_nn_grayscale_integer_ratio_half(datatype=np.float16):
input_tensor = Tensor(np.array(
[[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((9, 9))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1333, 0.1666, 0.2, 0.2333, 0.2666, 0.3, 0.3, 0.3],
[0.2, 0.2333, 0.2666, 0.2998, 0.3333, 0.3667, 0.4, 0.4, 0.4],
[0.2998, 0.3333, 0.3665, 0.4, 0.433, 0.4668, 0.5, 0.5, 0.5],
[0.4, 0.4333, 0.4666, 0.5, 0.533, 0.567, 0.6, 0.6, 0.6],
[0.5, 0.533, 0.5664, 0.6, 0.6333, 0.667, 0.7, 0.7, 0.7],
[0.6, 0.6333, 0.6665, 0.6997, 0.733, 0.7666, 0.8, 0.8, 0.8],
[0.7, 0.7334, 0.7666, 0.8, 0.833, 0.8667, 0.9, 0.9, 0.9],
[0.7, 0.7334, 0.7666, 0.8, 0.833, 0.8667, 0.9, 0.9, 0.9],
[0.7, 0.7334, 0.7666, 0.8, 0.833, 0.8667, 0.9, 0.9, 0.9]]]]
).astype(np.float16))
error = np.ones(shape=[9, 9]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((1, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1]]]]).astype(np.float16))
error = np.ones(shape=[1, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((1, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.1499, 0.2, 0.25, 0.3, 0.3]]]]).astype(np.float16))
error = np.ones(shape=[1, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((6, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1],
[0.2499],
[0.4],
[0.55],
[0.7],
[0.7]]]]).astype(np.float16))
error = np.ones(shape=[6, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((1, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.2, 0.3]]]]).astype(np.float16))
error = np.ones(shape=[1, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((6, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3],
[0.2499, 0.35, 0.4502],
[0.4, 0.5, 0.6],
[0.55, 0.65, 0.75],
[0.7, 0.8, 0.9],
[0.7, 0.8, 0.9]]]]).astype(np.float16))
error = np.ones(shape=[6, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, smaller w
resize_nn = NetResizeBilinear((3, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1],
[0.4],
[0.7]]]]).astype(np.float16))
error = np.ones(shape=[3, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, larger w
resize_nn = NetResizeBilinear((3, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1499, 0.2, 0.25, 0.3, 0.3],
[0.4, 0.45, 0.5, 0.55, 0.6, 0.6],
[0.7, 0.75, 0.8, 0.8496, 0.9, 0.9]]]]).astype(np.float16))
error = np.ones(shape=[3, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same w, same h (identity)
resize_nn = NetResizeBilinear((3, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array(
[[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9]]]]).astype(np.float16))
error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_integer_ratio_float(datatype=np.float32):
input_tensor = Tensor(np.array(
[[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((9, 9))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.13333334, 0.16666667, 0.2, 0.23333335, 0.26666668, 0.3, 0.3, 0.3],
[0.20000002, 0.23333335, 0.26666668, 0.3, 0.33333337, 0.3666667, 0.40000004,
0.40000004, 0.40000004],
[0.3, 0.33333337, 0.36666667, 0.40000004, 0.43333337, 0.4666667, 0.5, 0.5,
0.5],
[0.4, 0.43333334, 0.46666667, 0.5, 0.53333336, 0.5666667, 0.6, 0.6, 0.6],
[0.5, 0.53333336, 0.56666666, 0.6, 0.6333333, 0.66666675, 0.70000005,
0.70000005, 0.70000005],
[0.6, 0.6333334, 0.6666667, 0.70000005, 0.73333335, 0.7666667, 0.8, 0.8, 0.8],
[0.7, 0.73333335, 0.76666665, 0.8, 0.8333333, 0.8666667, 0.9, 0.9, 0.9],
[0.7, 0.73333335, 0.76666665, 0.8, 0.8333333, 0.8666667, 0.9, 0.9, 0.9],
[0.7, 0.73333335, 0.76666665, 0.8, 0.8333333, 0.8666667, 0.9, 0.9,
0.9]]]]).astype(np.float32))
error = np.ones(shape=[9, 9]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((1, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1]]]]).astype(np.float32))
error = np.ones(shape=[1, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((1, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.15, 0.2, 0.25, 0.3, 0.3]]]]).astype(np.float32))
error = np.ones(shape=[1, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((6, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1], [0.25], [0.4], [0.55], [0.7], [0.7]]]]).astype(np.float32))
error = np.ones(shape=[6, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((1, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.2, 0.3]]]]).astype(np.float32))
error = np.ones(shape=[1, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((6, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3],
[0.25, 0.35000002, 0.45000002],
[0.4, 0.5, 0.6],
[0.55, 0.65, 0.75],
[0.7, 0.8, 0.9],
[0.7, 0.8, 0.9]]]]).astype(np.float32))
error = np.ones(shape=[6, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, smaller w
resize_nn = NetResizeBilinear((3, 1))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1], [0.4], [0.7]]]]).astype(np.float32))
error = np.ones(shape=[3, 1]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same h, larger w
resize_nn = NetResizeBilinear((3, 6))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.15, 0.2, 0.25, 0.3, 0.3],
[0.4, 0.45, 0.5, 0.55, 0.6, 0.6],
[0.7, 0.75, 0.8, 0.85, 0.9, 0.9]]]]).astype(np.float32))
error = np.ones(shape=[3, 6]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# same w, same h (identity)
resize_nn = NetResizeBilinear((3, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array(
[[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(np.float32))
error = np.ones(shape=[3, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
def test_resize_nn_grayscale_not_integer_ratio_half(datatype=np.float16):
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 0.0, 0.1, 0.2]]]]).astype(datatype))
# larger h and w
resize_nn = NetResizeBilinear((7, 7))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1571, 0.2142, 0.2715, 0.3286, 0.3857, 0.4],
[0.2715, 0.3286, 0.3857, 0.4429, 0.5, 0.557, 0.5713],
[0.4429, 0.5, 0.557, 0.6143, 0.6714, 0.7285, 0.7427],
[0.6143, 0.5083, 0.4429, 0.5005, 0.557, 0.6143, 0.6284],
[0.7856, 0.4346, 0.1855, 0.2429, 0.2998, 0.357, 0.3716],
[0.9, 0.3857, 0.01428, 0.0714, 0.1285, 0.1857, 0.2],
[0.9, 0.3857, 0.01428, 0.0714, 0.1285, 0.1857, 0.2]]]]).astype(np.float16))
error = np.ones(shape=[7, 7]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h and w
resize_nn = NetResizeBilinear((2, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(
np.array([[[[0.1, 0.2333, 0.3667],
[0.7, 0.3333, 0.4666]]]]).astype(np.float16))
error = np.ones(shape=[2, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, larger w
resize_nn = NetResizeBilinear((2, 7))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.1571, 0.2142, 0.2715, 0.3286, 0.3857, 0.4],
[0.7, 0.4714, 0.3142, 0.3716, 0.4285, 0.4856, 0.5]]]]).astype(np.float16))
error = np.ones(shape=[2, 7]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, smaller w
resize_nn = NetResizeBilinear((5, 3))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2333, 0.3667],
[0.3398, 0.4731, 0.6064],
[0.58, 0.513, 0.6465],
[0.82, 0.1533, 0.2866],
[0.9, 0.03333, 0.1666]]]]).astype(np.float16))
error = np.ones(shape=[5, 3]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# smaller h, same w
resize_nn = NetResizeBilinear((2, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.7, 0.3, 0.4001, 0.5]]]]).astype(np.float16))
error = np.ones(shape=[2, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert np.all(abs(diff) < error)
# larger h, same w
resize_nn = NetResizeBilinear((8, 4))
output = resize_nn(input_tensor)
expected_output = Tensor(np.array([[[[0.1, 0.2, 0.3, 0.4],
[0.2499, 0.35, 0.4502, 0.55],
[0.4, 0.5, 0.6, 0.6997],
[0.55, 0.525, 0.625, 0.7246],
[0.7, 0.3, 0.4001, 0.5],
[0.8496, 0.0752, 0.1753, 0.2754],
[0.9, 0., 0.1, 0.2],
[0.9, 0., 0.1, 0.2]]]]).astype(np.float16))
error = np.ones(shape=[8, 4]) * 1.0e-6
diff = output.asnumpy() - expected_output.asnumpy()
assert | |
'1607865':{'en': 'Walton, NY'},
'1607869':{'en': 'Ovid, NY'},
'1607898':{'en': 'Groton, NY'},
'1607936':{'en': 'Corning, NY'},
'1607937':{'en': 'Corning, NY'},
'1607962':{'en': 'Corning, NY'},
'1607965':{'en': 'Edmeston, NY'},
'1607967':{'en': 'Bainbridge, NY'},
'1608':{'en': 'Wisconsin'},
'1608204':{'en': 'Madison, WI'},
'1608205':{'en': 'Stoughton, WI'},
'1608221':{'en': 'Madison, WI'},
'1608222':{'en': 'Madison, WI'},
'1608223':{'en': 'Madison, WI'},
'1608224':{'en': 'Madison, WI'},
'1608231':{'en': 'Madison, WI'},
'1608232':{'en': 'Madison, WI'},
'1608233':{'en': 'Madison, WI'},
'1608237':{'en': 'Madison, WI'},
'1608238':{'en': 'Madison, WI'},
'160824':{'en': 'Madison, WI'},
'160825':{'en': 'Madison, WI'},
'1608253':{'en': 'Wisconsin Dells, WI'},
'1608254':{'en': 'Wisconsin Dells, WI'},
'160826':{'en': 'Madison, WI'},
'1608269':{'en': 'Sparta, WI'},
'160827':{'en': 'Madison, WI'},
'1608280':{'en': 'Madison, WI'},
'1608287':{'en': 'Madison, WI'},
'1608296':{'en': 'Westfield, WI'},
'1608297':{'en': 'Montello, WI'},
'1608301':{'en': 'Madison, WI'},
'1608310':{'en': 'Madison, WI'},
'1608314':{'en': 'Janesville, WI'},
'1608323':{'en': 'Arcadia, WI'},
'1608324':{'en': 'Monroe, WI'},
'1608325':{'en': 'Monroe, WI'},
'1608326':{'en': '<NAME>, WI'},
'1608328':{'en': 'Monroe, WI'},
'1608329':{'en': 'Monroe, WI'},
'1608348':{'en': 'Platteville, WI'},
'1608355':{'en': 'Baraboo, WI'},
'1608356':{'en': 'Baraboo, WI'},
'1608361':{'en': 'Beloit, WI'},
'1608362':{'en': 'Beloit, WI'},
'1608363':{'en': 'Beloit, WI'},
'1608364':{'en': 'Beloit, WI'},
'1608365':{'en': 'Beloit, WI'},
'1608372':{'en': 'Tomah, WI'},
'1608374':{'en': 'Tomah, WI'},
'1608375':{'en': 'Boscobel, WI'},
'1608378':{'en': 'Warrens, WI'},
'1608392':{'en': 'La Crosse, WI'},
'1608417':{'en': 'Madison, WI'},
'1608423':{'en': 'Cambridge, WI'},
'1608424':{'en': 'Belleville, WI'},
'1608427':{'en': '<NAME>, WI'},
'1608429':{'en': 'Pardeeville, WI'},
'1608437':{'en': 'Mount Horeb, WI'},
'1608441':{'en': 'Madison, WI'},
'1608442':{'en': 'Madison, WI'},
'1608443':{'en': 'Madison, WI'},
'1608452':{'en': 'Coon Valley, WI'},
'1608462':{'en': 'Elroy, WI'},
'1608467':{'en': 'Madison, WI'},
'1608489':{'en': 'Hillsboro, WI'},
'1608524':{'en': 'Reedsburg, WI'},
'1608526':{'en': 'Holmen, WI'},
'1608527':{'en': 'New Glarus, WI'},
'1608562':{'en': 'New Lisbon, WI'},
'1608565':{'en': 'Necedah, WI'},
'1608582':{'en': 'Galesville, WI'},
'1608586':{'en': 'Oxford, WI'},
'1608588':{'en': 'Spring Green, WI'},
'1608592':{'en': 'Lodi, WI'},
'1608625':{'en': 'La Farge, WI'},
'1608630':{'en': 'Madison, WI'},
'1608634':{'en': 'Westby, WI'},
'1608635':{'en': 'Poynette, WI'},
'1608637':{'en': 'Viroqua, WI'},
'1608647':{'en': 'Richland Center, WI'},
'1608648':{'en': 'De Soto, WI'},
'1608654':{'en': 'Cashton, WI'},
'1608655':{'en': 'Marshall, WI'},
'1608661':{'en': 'Madison, WI'},
'1608662':{'en': 'Madison, WI'},
'1608663':{'en': 'Madison, WI'},
'1608676':{'en': 'Clinton, WI'},
'1608685':{'en': 'Alma, WI'},
'1608687':{'en': 'Fountain City, WI'},
'1608723':{'en': 'Lancaster, WI'},
'1608739':{'en': 'Muscoda, WI'},
'1608741':{'en': 'Janesville, WI'},
'1608742':{'en': 'Portage, WI'},
'1608743':{'en': 'Janesville, WI'},
'1608744':{'en': 'Cuba City, WI'},
'1608745':{'en': 'Portage, WI'},
'160875':{'en': 'Janesville, WI'},
'1608764':{'en': 'Deerfield, WI'},
'1608767':{'en': 'Black Earth, WI'},
'1608775':{'en': 'La Crosse, WI'},
'1608776':{'en': 'Darlington, WI'},
'1608779':{'en': 'Onalaska, WI'},
'1608782':{'en': 'La Crosse, WI'},
'1608783':{'en': 'Onalaska, WI'},
'1608784':{'en': 'La Crosse, WI'},
'1608785':{'en': 'La Crosse, WI'},
'1608786':{'en': 'West Salem, WI'},
'1608787':{'en': 'La Crosse, WI'},
'1608788':{'en': 'La Crosse, WI'},
'1608791':{'en': 'La Crosse, WI'},
'1608795':{'en': 'Mazomanie, WI'},
'1608796':{'en': 'La Crosse, WI'},
'1608798':{'en': 'Cross Plains, WI'},
'1608807':{'en': 'Madison, WI'},
'1608819':{'en': 'Madison, WI'},
'1608822':{'en': 'Fennimore, WI'},
'1608824':{'en': 'Madison, WI'},
'1608825':{'en': 'Sun Prairie, WI'},
'1608826':{'en': 'Madison, WI'},
'1608827':{'en': 'Madison, WI'},
'1608828':{'en': 'Madison, WI'},
'1608829':{'en': 'Madison, WI'},
'1608831':{'en': 'Middleton, WI'},
'1608833':{'en': 'Madison, WI'},
'1608834':{'en': 'Sun Prairie, WI'},
'1608835':{'en': 'Oregon, WI'},
'1608836':{'en': 'Middleton, WI'},
'1608837':{'en': 'Sun Prairie, WI'},
'1608838':{'en': 'McFarland, WI'},
'1608839':{'en': 'Cottage Grove, WI'},
'1608845':{'en': 'Verona, WI'},
'1608846':{'en': 'DeForest, WI'},
'1608847':{'en': 'Mauston, WI'},
'1608848':{'en': 'Verona, WI'},
'1608849':{'en': 'Waunakee, WI'},
'1608850':{'en': 'Waunakee, WI'},
'1608868':{'en': 'Milton, WI'},
'1608873':{'en': 'Stoughton, WI'},
'1608877':{'en': 'Stoughton, WI'},
'1608882':{'en': 'Evansville, WI'},
'1608884':{'en': 'Edgerton, WI'},
'1608897':{'en': 'Brodhead, WI'},
'1608924':{'en': 'Barneveld, WI'},
'1608930':{'en': 'Dodgeville, WI'},
'1608935':{'en': 'Dodgeville, WI'},
'1608938':{'en': 'Monticello, WI'},
'1608965':{'en': 'Shullsburg, WI'},
'1608987':{'en': 'Mineral Point, WI'},
'1608989':{'en': 'Blair, WI'},
'1608994':{'en': 'Bloomington, WI'},
'1609':{'en': 'New Jersey'},
'1609239':{'en': 'Burlington Township, NJ'},
'1609242':{'en': 'Forked River, NJ'},
'1609252':{'en': 'Princeton, NJ'},
'1609263':{'en': 'Sea Isle City, NJ'},
'1609266':{'en': 'Brigantine, NJ'},
'1609278':{'en': 'Trenton, NJ'},
'1609279':{'en': 'Princeton, NJ'},
'1609292':{'en': 'Trenton, NJ'},
'160934':{'en': 'Atlantic City, NJ'},
'1609368':{'en': 'Stone Harbor, NJ'},
'1609390':{'en': 'Marmora, NJ'},
'1609391':{'en': 'Ocean City, NJ'},
'1609392':{'en': 'Trenton, NJ'},
'1609393':{'en': 'Trenton, NJ'},
'1609394':{'en': 'Trenton, NJ'},
'1609396':{'en': 'Trenton, NJ'},
'1609397':{'en': 'Lambertville, NJ'},
'1609398':{'en': 'Ocean City, NJ'},
'1609399':{'en': 'Ocean City, NJ'},
'1609404':{'en': 'Galloway, NJ'},
'1609430':{'en': 'Princeton, NJ'},
'1609441':{'en': 'Atlantic City, NJ'},
'1609452':{'en': 'Princeton, NJ'},
'1609454':{'en': 'Princeton, NJ'},
'1609463':{'en': 'Cape May Ct Hse, NJ'},
'1609465':{'en': 'Cape May Ct Hse, NJ'},
'1609466':{'en': 'Hopewell, NJ'},
'1609492':{'en': 'Beach Haven, NJ'},
'1609497':{'en': 'Princeton, NJ'},
'1609514':{'en': 'Princeton, NJ'},
'1609520':{'en': 'Princeton, NJ'},
'1609522':{'en': 'Wildwood, NJ'},
'1609523':{'en': 'Wildwood, NJ'},
'1609530':{'en': 'Ewing Township, NJ'},
'1609538':{'en': 'Ewing Township, NJ'},
'1609561':{'en': 'Hammonton, NJ'},
'1609567':{'en': 'Hammonton, NJ'},
'1609572':{'en': 'Atlantic City, NJ'},
'160958':{'en': 'Trenton, NJ'},
'1609597':{'en': 'Manahawkin, NJ'},
'1609599':{'en': 'Trenton, NJ'},
'1609607':{'en': 'Barnegat Township, NJ'},
'1609625':{'en': 'Mays Landing, NJ'},
'1609628':{'en': 'Woodbine, NJ'},
'1609631':{'en': 'Trenton, NJ'},
'1609652':{'en': 'Galloway, NJ'},
'1609654':{'en': 'Medford, NJ'},
'1609656':{'en': 'Trenton, NJ'},
'1609660':{'en': 'Barnegat Township, NJ'},
'1609683':{'en': 'Princeton, NJ'},
'1609688':{'en': 'Princeton, NJ'},
'1609689':{'en': 'Trenton, NJ'},
'1609693':{'en': 'Forked River, NJ'},
'1609695':{'en': 'Trenton, NJ'},
'1609698':{'en': 'Barnegat Township, NJ'},
'1609704':{'en': 'Hammonton, NJ'},
'1609714':{'en': 'Medford, NJ'},
'1609729':{'en': 'Wildwood, NJ'},
'1609730':{'en': 'Pennington, NJ'},
'1609737':{'en': 'Pennington, NJ'},
'1609747':{'en': 'Burlington Township, NJ'},
'1609748':{'en': 'Galloway, NJ'},
'1609771':{'en': 'Ewing Township, NJ'},
'1609818':{'en': 'Pennington, NJ'},
'1609835':{'en': 'Willingboro, NJ'},
'1609838':{'en': 'Trenton, NJ'},
'1609844':{'en': 'Lawrenceville, NJ'},
'1609859':{'en': 'Southampton Township, NJ'},
'1609861':{'en': 'Woodbine, NJ'},
'1609871':{'en': 'Willingboro, NJ'},
'1609877':{'en': 'Willingboro, NJ'},
'1609882':{'en': 'Ewing Township, NJ'},
'1609883':{'en': 'Ewing Township, NJ'},
'1609884':{'en': '<NAME>, NJ'},
'1609888':{'en': 'Trenton, NJ'},
'1609890':{'en': 'Trenton, NJ'},
'1609893':{'en': 'Browns Mills, NJ'},
'1609894':{'en': 'Pemberton, NJ'},
'1609895':{'en': 'Lawrenceville, NJ'},
'1609896':{'en': 'Lawrenceville, NJ'},
'1609898':{'en': 'C<NAME>, NJ'},
'1609909':{'en': 'Mays Landing, NJ'},
'1609919':{'en': 'Princeton, NJ'},
'1609921':{'en': 'Princeton, NJ'},
'1609924':{'en': 'Princeton, NJ'},
'1609951':{'en': 'Princeton, NJ'},
'1609953':{'en': 'Medford, NJ'},
'1609965':{'en': 'Egg Harbor City, NJ'},
'1609967':{'en': 'Avalon, NJ'},
'1609971':{'en': 'Forked River, NJ'},
'1609978':{'en': 'Manahawkin, NJ'},
'1609987':{'en': 'Princeton, NJ'},
'1609989':{'en': 'Trenton, NJ'},
'1610':{'en': 'Pennsylvania'},
'1610208':{'en': 'Reading, PA'},
'1610225':{'en': 'Wayne, PA'},
'1610237':{'en': 'Darby, PA'},
'1610250':{'en': 'Easton, PA'},
'1610252':{'en': 'Easton, PA'},
'1610253':{'en': 'Easton, PA'},
'1610255':{'en': 'Landenberg, PA'},
'1610258':{'en': 'Easton, PA'},
'1610261':{'en': 'Northampton, PA'},
'1610262':{'en': 'Northampton, PA'},
'1610265':{'en': 'King of Prussia, PA'},
'1610268':{'en': 'Avondale, PA'},
'1610269':{'en': 'Downingtown, PA'},
'161027':{'en': 'Norristown, PA'},
'1610273':{'en': 'Honey Brook, PA'},
'1610280':{'en': 'Exton, PA'},
'1610282':{'en': 'Coopersburg, PA'},
'1610287':{'en': 'Schwenksville, PA'},
'1610293':{'en': 'Wayne, PA'},
'1610298':{'en': 'New Tripoli, PA'},
'1610317':{'en': 'Bethlehem, PA'},
'1610323':{'en': 'Pottstown, PA'},
'1610326':{'en': 'Pottstown, PA'},
'1610327':{'en': 'Pottstown, PA'},
'1610328':{'en': 'Springfield, PA'},
'1610330':{'en': 'Easton, PA'},
'1610336':{'en': 'Allentown, PA'},
'1610337':{'en': 'King of Prussia, PA'},
'1610344':{'en': 'West Chester, PA'},
'1610345':{'en': 'West Grove, PA'},
'1610347':{'en': 'Kennett Square, PA'},
'1610351':{'en': 'Allentown, PA'},
'1610352':{'en': 'Upper Darby, PA'},
'1610354':{'en': 'King of Prussia, PA'},
'1610363':{'en': 'Exton, PA'},
'1610366':{'en': 'Allentown, PA'},
'1610367':{'en': 'Boyertown, PA'},
'1610369':{'en': 'Boyertown, PA'},
'161037':{'en': 'Reading, PA'},
'1610377':{'en': 'Lehighton, PA'},
'1610380':{'en': 'Coatesville, PA'},
'1610383':{'en': 'Coatesville, PA'},
'1610384':{'en': 'Coatesville, PA'},
'1610385':{'en': 'Douglassville, PA'},
'1610388':{'en': '<NAME>, PA'},
'1610391':{'en': 'Allentown, PA'},
'1610395':{'en': 'Allentown, PA'},
'1610398':{'en': 'Allentown, PA'},
'1610399':{'en': 'West Chester, PA'},
'1610402':{'en': 'Allentown, PA'},
'1610404':{'en': 'Birdsboro, PA'},
'1610409':{'en': 'Collegeville, PA'},
'1610415':{'en': 'Phoenixville, PA'},
'1610419':{'en': 'Bethlehem, PA'},
'1610429':{'en': 'West Chester, PA'},
'161043':{'en': 'Allentown, PA'},
'1610430':{'en': 'West Chester, PA'},
'1610431':{'en': 'West Chester, PA'},
'1610436':{'en': 'West Chester, PA'},
'1610438':{'en': 'Easton, PA'},
'1610444':{'en': 'Kennett Square, PA'},
'1610446':{'en': 'Havertown, PA'},
'1610447':{'en': 'Chester, PA'},
'1610449':{'en': 'Havertown, PA'},
'1610454':{'en': 'Collegeville, PA'},
'1610466':{'en': 'Coatesville, PA'},
'1610469':{'en': 'Pottstown, PA'},
'1610473':{'en': 'Boyertown, PA'},
'1610478':{'en': 'Reading, PA'},
'1610481':{'en': 'Allentown, PA'},
'1610488':{'en': 'Bernville, PA'},
'1610489':{'en': 'Collegeville, PA'},
'1610515':{'en': 'Easton, PA'},
'1610518':{'en': 'Downingtown, PA'},
'1610520':{'en': 'Bryn Mawr, PA'},
'1610524':{'en': 'Exton, PA'},
'1610525':{'en': '<NAME>, PA'},
'1610526':{'en': '<NAME>awr, PA'},
'1610527':{'en': 'Bryn Mawr, PA'},
'1610530':{'en': 'Allentown, PA'},
'1610543':{'en': 'Springfield, PA'},
'1610544':{'en': 'Springfield, PA'},
'1610559':{'en': 'Easton, PA'},
'1610562':{'en': 'Hamburg, PA'},
'1610565':{'en': 'Media, PA'},
'1610566':{'en': 'Media, PA'},
'1610582':{'en': 'Birdsboro, PA'},
'1610588':{'en': 'Bangor, PA'},
'1610589':{'en': 'Womelsdorf, PA'},
'1610594':{'en': 'Exton, PA'},
'1610599':{'en': 'Bangor, PA'},
'1610619':{'en': 'Chester, PA'},
'1610625':{'en': 'Bethlehem, PA'},
'1610627':{'en': 'Media, PA'},
'1610628':{'en': 'Allentown, PA'},
'1610645':{'en': 'Wynnewood, PA'},
'1610648':{'en': 'Paoli, PA'},
'1610655':{'en': 'Reading, PA'},
'1610660':{'en': 'Bala Cynwyd, PA'},
'1610667':{'en': 'Bala Cynwyd, PA'},
'1610668':{'en': 'Bala Cynwyd, PA'},
'1610683':{'en': 'Kutztown, PA'},
'1610685':{'en': 'Reading, PA'},
'1610687':{'en': 'Wayne, PA'},
'1610688':{'en': 'Wayne, PA'},
'1610690':{'en': 'Springfield, PA'},
'1610691':{'en': 'Bethlehem, PA'},
'1610692':{'en': 'West Chester, PA'},
'1610693':{'en': 'Robesonia, PA'},
'1610694':{'en': 'Bethlehem, PA'},
'1610696':{'en': 'West Chester, PA'},
'1610701':{'en': 'West Chester, PA'},
'1610705':{'en': 'Pottstown, PA'},
'1610718':{'en': 'Pottstown, PA'},
'1610734':{'en': 'Upper Darby, PA'},
'1610738':{'en': 'West Chester, PA'},
'1610740':{'en': 'Allentown, PA'},
'1610743':{'en': 'Reading, PA'},
'1610746':{'en': 'Nazareth, PA'},
'1610750':{'en': 'Reading, PA'},
'1610756':{'en': 'Kempton, PA'},
'1610758':{'en': 'Bethlehem, PA'},
'1610759':{'en': 'Nazareth, PA'},
'1610768':{'en': 'King of Prussia, PA'},
'1610770':{'en': 'Allentown, PA'},
'1610775':{'en': 'Reading, PA'},
'1610776':{'en': 'Allentown, PA'},
'1610779':{'en': 'Reading, PA'},
'1610782':{'en': 'Allentown, PA'},
'1610783':{'en': 'King of Prussia, PA'},
'1610791':{'en': 'Allentown, PA'},
'1610792':{'en': 'Royersford, PA'},
'1610793':{'en': 'West Chester, PA'},
'1610796':{'en': 'Reading, PA'},
'1610797':{'en': 'Allentown, PA'},
'1610807':{'en': 'Bethlehem, PA'},
'1610814':{'en': 'Bethlehem, PA'},
'1610820':{'en': 'Allentown, PA'},
'1610821':{'en': 'Allentown, PA'},
'1610826':{'en': 'Palmerton, PA'},
'1610827':{'en': 'Chester Springs, PA'},
'1610831':{'en': 'Collegeville, PA'},
'1610837':{'en': 'Bath, PA'},
'1610838':{'en': 'Hellertown, PA'},
'1610841':{'en': 'Allentown, PA'},
'1610847':{'en': 'Ottsville, PA'},
'1610853':{'en': 'Havertown, PA'},
'1610856':{'en': 'Mohnton, PA'},
'1610857':{'en': 'Parkesburg, | |
bad commit
return render(request, 'deploys/all_history.html', {
"deploy_summaries": [],
"filter_title": filter_title,
"pageIndex": index,
"pageSize": size,
"from_date": from_date,
"from_time": from_time,
"to_date": to_date,
"to_time": to_time,
"commit": commit,
"repo": repo,
"branch": branch,
"reverse_date": reverse_date,
"operator": operator,
'pageRange': range(0),
"prevPageIndex": 0,
"nextPageIndex": 0,
"query_string": query_string,
})
filter['pageIndex'] = index
filter['pageSize'] = size
result = deploys_helper.get_all(request, **filter)
deploy_summaries = _gen_deploy_summary(request, result['deploys'])
page_range, prevPageIndex, nextPageIndex = _compute_range(result['total'], index, size,
DEFAULT_TOTAL_PAGES)
return render(request, 'deploys/all_history.html', {
"deploy_summaries": deploy_summaries,
"filter_title": filter_title,
"pageIndex": index,
"pageSize": size,
"from_date": from_date,
"from_time": from_time,
"to_date": to_date,
"to_time": to_time,
"commit": commit,
"repo": repo,
"branch": branch,
"reverse_date": reverse_date,
"operator": operator,
'pageRange': page_range,
"prevPageIndex": prevPageIndex,
"nextPageIndex": nextPageIndex,
"query_string": query_string,
})
def get_env_deploys(request, name, stage):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_PAGE_SIZE))
from_date = request.GET.get('from_date', None)
from_time = request.GET.get('from_time', None)
to_date = request.GET.get('to_date', None)
to_time = request.GET.get('to_time', None)
commit = request.GET.get('commit', None)
repo = request.GET.get('repo', None)
branch = request.GET.get('branch', None)
reverse_date = request.GET.get('reverse_date', None)
operator = request.GET.get('operator', None)
filter, filter_title, query_string = \
_gen_deploy_query_filter(request, from_date, from_time, to_date, to_time, size,
reverse_date, operator, commit, repo, branch)
if filter is None:
return render(request, 'environs/env_history.html', {
"envs": envs,
"env": env,
"stages": stages,
"deploy_summaries": [],
"filter_title": filter_title,
"pageIndex": index,
"pageSize": size,
"from_date": from_date,
"from_time": from_time,
"to_date": to_date,
"to_time": to_time,
"commit": commit,
"repo": repo,
"branch": branch,
"reverse_date": reverse_date,
"operator": operator,
'pageRange': range(0),
"prevPageIndex": 0,
"nextPageIndex": 0,
"query_string": query_string,
"pinterest": IS_PINTEREST
})
filter['envId'] = [env['id']]
filter['pageIndex'] = index
filter['pageSize'] = size
result = deploys_helper.get_all(request, **filter)
deploy_summaries = _gen_deploy_summary(request, result['deploys'], for_env=env)
page_range, prevPageIndex, nextPageIndex = _compute_range(result['total'], index, size,
DEFAULT_TOTAL_PAGES)
return render(request, 'environs/env_history.html', {
"envs": envs,
"env": env,
"stages": stages,
"deploy_summaries": deploy_summaries,
"filter_title": filter_title,
"pageIndex": index,
"pageSize": size,
"from_date": from_date,
"from_time": from_time,
"to_date": to_date,
"to_time": to_time,
"commit": commit,
"repo": repo,
"branch": branch,
"reverse_date": reverse_date,
"operator": operator,
'pageRange': page_range,
"prevPageIndex": prevPageIndex,
"nextPageIndex": nextPageIndex,
"query_string": query_string,
"pinterest": IS_PINTEREST
})
def get_env_names(request):
# TODO create a loop to get all names
max_size = 10000
names = environs_helper.get_all_env_names(request, index=1, size=max_size)
return HttpResponse(json.dumps(names), content_type="application/json")
def search_envs(request, filter):
max_size = 10000
names = environs_helper.get_all_env_names(request, name_filter=filter, index=1, size=max_size)
if not names:
return redirect('/envs/')
if len(names) == 1:
return redirect('/env/%s/' % names[0])
envs_tag = tags_helper.get_latest_by_targe_id(request, 'TELETRAAN')
return render(request, 'environs/envs_landing.html', {
"names": names,
"pageIndex": 1,
"pageSize": DEFAULT_PAGE_SIZE,
"disablePrevious": True,
"disableNext": True,
"envs_tag": envs_tag,
})
def post_create_env(request):
# TODO how to validate envName
data = request.POST
env_name = data["env_name"]
stage_name = data["stage_name"]
clone_env_name = data.get("clone_env_name")
clone_stage_name = data.get("clone_stage_name")
description = data.get('description')
if clone_env_name and clone_stage_name:
common.clone_from_stage_name(request, env_name, stage_name, clone_env_name,
clone_stage_name, description)
else:
data = {}
data['envName'] = env_name
data['stageName'] = stage_name
data['description'] = description
environs_helper.create_env(request, data)
return redirect('/env/' + env_name + '/' + stage_name + '/config/')
class EnvNewDeployView(View):
def get(self, request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
env_promote = environs_helper.get_env_promotes_config(request, name, stage)
current_build = None
if 'deployId' in env and env['deployId']:
deploy = deploys_helper.get(request, env['deployId'])
current_build = builds_helper.get_build(request, deploy['buildId'])
return render(request, 'deploys/new_deploy.html', {
"env": env,
"env_promote": env_promote,
"buildName": env['buildName'],
"current_build": current_build,
"pageIndex": 1,
"pageSize": common.DEFAULT_BUILD_SIZE,
})
def post(self, request, name, stage):
common.deploy(request, name, stage)
if name == 'ngapp2-A' or name == 'ngapp2-B':
return redirect("/env/ngapp2/deploy/?stage=2")
return redirect('/env/%s/%s/deploy' % (name, stage))
def post_add_stage(request, name):
# TODO how to validate stage name
data = request.POST
stage = data.get("stage")
from_stage = data.get("from_stage")
description = data.get("description")
if from_stage:
common.clone_from_stage_name(request, name, stage, name, from_stage, description)
else:
data = {}
data['envName'] = name
data['stageName'] = stage
data['description'] = description
environs_helper.create_env(request, data)
return redirect('/env/' + name + '/' + stage + '/config/')
def remove_stage(request, name, stage):
# TODO so we need to make sure the capacity is empty???
environs_helper.delete_env(request, name, stage)
envs = environs_helper.get_all_env_stages(request, name)
response = redirect('/env/' + name)
if len(envs) == 0:
cookie_response = removeEnvCookie(request, name)
if not cookie_response:
response.delete_cookie(ENV_COOKIE_NAME)
else:
response.set_cookie(ENV_COOKIE_NAME, cookie_response)
return response
def get_builds(request, name, stage):
env = environs_helper.get_env_by_stage(request, name, stage)
env_promote = environs_helper.get_env_promotes_config(request, name, stage)
show_lock = False
if env_promote['type'] == 'AUTO' and env_promote['predStage'] and \
env_promote['predStage'] == environs_helper.BUILD_STAGE:
show_lock = True
if 'buildName' not in env and not env['buildName']:
html = render_to_string('builds/simple_builds.tmpl', {
"builds": [],
"env": env,
"show_lock": show_lock,
})
return HttpResponse(html)
current_publish_date = 0
if 'deployId' in env and env['deployId']:
deploy = deploys_helper.get(request, env['deployId'])
build = builds_helper.get_build(request, deploy['buildId'])
current_publish_date = build['publishDate']
# return only the new builds
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', common.DEFAULT_BUILD_SIZE))
builds = builds_helper.get_builds_and_tags(request, name=env['buildName'], pageIndex=index,
pageSize=size)
new_builds = []
for build in builds:
if build['build']['publishDate'] > current_publish_date:
new_builds.append(build)
html = render_to_string('builds/simple_builds.tmpl', {
"builds": new_builds,
"current_publish_date": current_publish_date,
"env": env,
"show_lock": show_lock,
})
return HttpResponse(html)
def upload_private_build(request, name, stage):
return private_builds_helper.handle_uploaded_build(request, request.FILES['file'], name, stage)
def get_groups(request, name, stage):
groups = common.get_env_groups(request, name, stage)
html = render_to_string('groups/simple_groups.tmpl', {
"groups": groups,
})
return HttpResponse(html)
def deploy_build(request, name, stage, build_id):
env = environs_helper.get_env_by_stage(request, name, stage)
current_build = None
deploy_state = None
if env.get('deployId'):
current_deploy = deploys_helper.get(request, env['deployId'])
current_build = builds_helper.get_build(request, current_deploy['buildId'])
deploy_state = deploys_helper.get(request, env['deployId'])['state']
build = builds_helper.get_build_and_tag(request, build_id)
builds = [build]
scm_url = systems_helper.get_scm_url(request)
html = render_to_string('deploys/deploy_build.html', {
"env": env,
"builds": builds,
"current_build": current_build,
"scm_url": scm_url,
"buildName": env.get('buildName'),
"branch": env.get('branch'),
"csrf_token": get_token(request),
"deployState": deploy_state,
"overridePolicy": env.get('overridePolicy'),
})
return HttpResponse(html)
def deploy_commit(request, name, stage, commit):
env = environs_helper.get_env_by_stage(request, name, stage)
builds = builds_helper.get_builds_and_tags(request, commit=commit)
current_build = None
if env.get('deployId'):
deploy = deploys_helper.get(request, env['deployId'])
current_build = builds_helper.get_build(request, deploy['buildId'])
scm_url = systems_helper.get_scm_url(request)
html = render_to_string('deploys/deploy_build.html', {
"env": env,
"builds": builds,
"current_build": current_build,
"scm_url": scm_url,
"buildName": env.get('buildName'),
"branch": env.get('branch'),
"csrf_token": get_token(request),
})
return HttpResponse(html)
def promote_to(request, name, stage, deploy_id):
query_dict = request.POST
toStages = query_dict['toStages']
description = query_dict['description']
toStage = None
for toStage in toStages.split(','):
deploys_helper.promote(request, name, toStage, deploy_id, description)
return redirect('/env/%s/%s/deploy' % (name, toStage))
def restart(request, name, stage):
common.restart(request, name, stage)
return redirect('/env/%s/%s/deploy' % (name, stage))
def rollback_to(request, name, stage, deploy_id):
common.rollback_to(request, name, stage, deploy_id)
return redirect('/env/%s/%s/deploy' % (name, stage))
def rollback(request, name, stage):
query_dict = request.GET
to_deploy_id = query_dict.get('to_deploy_id', None)
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
result = deploys_helper.get_all(request, envId=[env['id']], pageIndex=1,
pageSize=DEFAULT_ROLLBACK_DEPLOY_NUM)
deploys = result.get("deploys")
# remove the first deploy if exists
if deploys:
deploys.pop(0)
# append the build info
deploy_summaries = []
branch = None
commit = None
build_id = None
for deploy in deploys:
build_info = builds_helper.get_build_and_tag(request, deploy['buildId'])
build = build_info["build"]
tag = build_info.get("tag", None)
summary = {}
summary['deploy'] = deploy
summary['build'] = build
summary['tag'] = tag
if not to_deploy_id and deploy['state'] == 'SUCCEEDED':
to_deploy_id = deploy['id']
if to_deploy_id and to_deploy_id == deploy['id']:
branch = build['branch']
commit = build['commitShort']
build_id = build['id']
deploy_summaries.append(summary)
html = render_to_string("environs/env_rollback.html", {
"envs": envs,
"stages": stages,
"envs": envs,
"env": env,
"deploy_summaries": deploy_summaries,
"to_deploy_id": to_deploy_id,
"branch": branch,
"commit": commit,
"build_id": build_id,
"csrf_token": get_token(request),
})
return HttpResponse(html)
def get_deploy(request, name, stage, deploy_id):
deploy = deploys_helper.get(request, deploy_id)
build = builds_helper.get_build(request, deploy['buildId'])
env = environs_helper.get_env_by_stage(request, name, stage)
return render(request, 'environs/env_deploy_details.html', {
"deploy": deploy,
"csrf_token": get_token(request),
"build": build,
"env": env,
})
def promote(request, name, stage, deploy_id):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
env_wrappers = []
for temp_env in envs:
env_wrapper = {}
env_wrapper["env"] = temp_env
env_wrapper["env_promote"] = environs_helper.get_env_promotes_config(request,
temp_env['envName'],
temp_env['stageName'])
env_wrappers.append(env_wrapper)
deploy = deploys_helper.get(request, deploy_id)
build = builds_helper.get_build(request, deploy['buildId'])
html = render_to_string("environs/env_promote.html", {
"envs": envs,
"stages": stages,
"envs": envs,
"env": env,
"env_wrappers": env_wrappers,
"deploy": deploy,
"build": build,
"csrf_token": get_token(request),
})
return HttpResponse(html)
def pause(request, name, stage):
deploys_helper.pause(request, name, stage)
return redirect('/env/%s/%s/deploy' % (name, stage))
def resume(request, name, stage):
deploys_helper.resume(request, name, stage)
return redirect('/env/%s/%s/deploy' % (name, stage))
def enable_env_change(request, name, stage):
params = request.POST
description = params.get('description')
environs_helper.enable_env_changes(request, name, stage, description)
return redirect('/env/%s/%s/deploy' % (name, stage))
def disable_env_change(request, name, stage):
params = request.POST
description = params.get('description')
environs_helper.disable_env_changes(request, name, stage, description)
return redirect('/env/%s/%s/deploy' % (name, stage))
def enable_all_env_change(request):
params = request.POST
description = params.get('description')
environs_helper.enable_all_env_changes(request, description)
return redirect('/envs/')
def disable_all_env_change(request):
params = request.POST
description = params.get('description')
environs_helper.disable_all_env_changes(request, description)
return redirect('/envs/')
# get all reachable hosts
def get_hosts(request, name, stage):
envs = environs_helper.get_all_env_stages(request, name)
stages, env = common.get_all_stages(envs, stage)
agents = agents_helper.get_agents(request, env['envName'], env['stageName'])
title = "All hosts"
agents_wrapper = {}
for agent in agents:
if agent['deployId'] not in agents_wrapper:
agents_wrapper[agent['deployId']] = []
agents_wrapper[agent['deployId']].append(agent)
return render(request, 'environs/env_hosts.html', {
"envs": envs,
"env": env,
"stages": stages,
"agents_wrapper": agents_wrapper,
"title": title,
})
# get total alive hosts (hostStage == -1000)
# get alive hosts |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.