id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
327975
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*
''' I3Modules to add Labels for deep Learning
'''
from __future__ import print_function, division
import numpy as np
from icecube import dataclasses, icetray
from icecube.icetray.i3logging import log_error, log_warn
from ic3_labels.labels.base_module import MCLabelsBase
from ic3_labels.labels.utils import high_level as hl
from ic3_labels.labels.utils import muon as mu_utils
from ic3_labels.labels.utils import general
class MCLabelsDeepLearning(MCLabelsBase):
"""Creates extensive Muon, primary and misc Labels.
"""
def __init__(self, context):
# super(MCLabelsDeepLearning, self).__init__(self, context)
MCLabelsBase.__init__(self, context)
self.AddParameter("IsMuonGun",
"Indicate whether this is a MuonGun dataset.", False)
def Configure(self):
# super(MCLabelsDeepLearning, self).Configure(self)
MCLabelsBase.Configure(self)
self._is_muongun = self.GetParameter("IsMuonGun")
def Physics(self, frame):
labels = hl.get_labels(frame=frame,
convex_hull=self._convex_hull,
domPosDict=self._dom_pos_dict,
primary=frame[self._primary_key],
pulse_map_string=self._pulse_map_string,
mcpe_series_map_name=self._mcpe_series_map_name,
is_muongun=self._is_muongun)
# write to frame
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsTau(MCLabelsBase):
def Physics(self, frame):
labels = hl.get_tau_labels(
frame=frame,
convex_hull=self._convex_hull)
# write to frame
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsCascadeParameters(MCLabelsBase):
def Physics(self, frame):
labels = hl.get_cascade_parameters(frame=frame,
primary=frame[self._primary_key],
convex_hull=self._convex_hull,
extend_boundary=500)
# write to frame
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsCascades(MCLabelsBase):
def __init__(self, context):
# super(MCLabelsCascades, self).__init__(self, context)
MCLabelsBase.__init__(self, context)
self.AddParameter("ExtendBoundary",
"Extend boundary of convex hull [in meters].",
0)
def Configure(self):
# super(MCLabelsCascades, self).Configure(self)
MCLabelsBase.Configure(self)
self._extend_boundary = self.GetParameter("ExtendBoundary")
def Physics(self, frame):
labels = hl.get_cascade_labels(frame=frame,
primary=frame[self._primary_key],
convex_hull=self._convex_hull,
extend_boundary=self._extend_boundary)
# write to frame
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsCorsikaMultiplicity(MCLabelsBase):
def Physics(self, frame):
labels = hl.get_muon_bundle_information(frame=frame,
convex_hull=self._convex_hull)
labels['num_coincident_events'] = \
general.get_num_coincident_events(frame)
primary = frame[self._primary_key]
labels['PrimaryEnergy'] = primary.energy
labels['PrimaryAzimuth'] = primary.dir.azimuth
labels['PrimaryZenith'] = primary.dir.zenith
labels['PrimaryDirectionX'] = primary.dir.x
labels['PrimaryDirectionY'] = primary.dir.y
labels['PrimaryDirectionZ'] = primary.dir.z
label_names = ['num_coincident_events', 'num_muons',
'num_muons_at_cyl', 'num_muons_at_cyl_above_threshold',
'num_muons_at_entry',
'num_muons_at_entry_above_threshold']
pid_names = ['p_is_coincident_event', 'p_is_muon_bundle',
'p_is_muon_bundle_at_cyl',
'p_is_muon_bundle_at_cyl_above_threshold',
'p_is_muon_bundle_at_entry',
'p_is_muon_bundle_at_entry_above_threshold']
for label, p_name in zip(label_names, pid_names):
labels[p_name] = labels[label] > 1
# write to frame
frame.Put(self._output_key, dataclasses.I3MapStringDouble(labels))
self.PushFrame(frame)
class MCLabelsCorsikaAzimuthExcess(MCLabelsBase):
def Physics(self, frame):
# create empty labelDict
labels = dataclasses.I3MapStringDouble()
muons_inside = mu_utils.get_muons_inside(frame, self._convex_hull)
labels['NoOfMuonsInside'] = len(muons_inside)
# get muons
mostEnergeticMuon = mu_utils.get_most_energetic_muon_inside(
frame, self._convex_hull,
muons_inside=muons_inside)
if mostEnergeticMuon is None:
labels['Muon_energy'] = np.nan
labels['Muon_vertexX'] = np.nan
labels['Muon_vertexY'] = np.nan
labels['Muon_vertexZ'] = np.nan
labels['Muon_vertexTime'] = np.nan
labels['Muon_azimuth'] = np.nan
labels['Muon_zenith'] = np.nan
else:
labels['Muon_energy'] = mostEnergeticMuon.energy
labels['Muon_vertexX'] = mostEnergeticMuon.pos.x
labels['Muon_vertexY'] = mostEnergeticMuon.pos.y
labels['Muon_vertexZ'] = mostEnergeticMuon.pos.z
labels['Muon_vertexTime'] = mostEnergeticMuon.time
labels['Muon_azimuth'] = mostEnergeticMuon.dir.azimuth
labels['Muon_zenith'] = mostEnergeticMuon.dir.zenith
# write to frame
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsMuonScattering(MCLabelsBase):
"""Creates labels to identify muon scattering.
"""
def __init__(self, context):
# super(MCLabelsDeepLearning, self).__init__(self, context)
MCLabelsBase.__init__(self, context)
self.AddParameter("MinLength",
"Minimum required track lenth inside detector to "
"qualify an event as a muon scattering event",
1000)
self.AddParameter("MinLengthBefore",
"Minimum required track lenth inside detector "
"before the energy loss to qualify an event as a "
"muon scattering event",
400)
self.AddParameter("MinLengthAfter",
"Minimum required track lenth inside detector "
"after the energy loss to qualify an event as a "
"muon scattering event",
400)
self.AddParameter("MinMuonEntryEnergy",
"Minimum required muon energy at point of entry "
"to qualify an event as a muon scattering event",
10000)
self.AddParameter("MinRelativeLossEnergy",
"Minimum required relative energy of the muon loss "
"to qualify an event as a muon scattering event. "
"The relative energy loss is calculated as the "
"loss energy / muon energy at entry",
0.5)
def Configure(self):
# super(MCLabelsDeepLearning, self).Configure(self)
MCLabelsBase.Configure(self)
self._min_length = self.GetParameter("MinLength")
self._min_length_before = self.GetParameter("MinLengthBefore")
self._min_length_after = self.GetParameter("MinLengthAfter")
self._min_muon_entry_energy = self.GetParameter("MinMuonEntryEnergy")
self._min_rel_loss_energy = self.GetParameter("MinRelativeLossEnergy")
def Physics(self, frame):
labels = mu_utils.get_muon_scattering_info(
frame=frame,
primary=frame[self._primary_key],
convex_hull=self._convex_hull,
min_length=self._min_length,
min_length_before=self._min_length_before,
min_length_after=self._min_length_after,
min_muon_entry_energy=self._min_muon_entry_energy,
min_rel_loss_energy=self._min_rel_loss_energy,
)
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsMuonEnergyLosses(MCLabelsBase):
def __init__(self, context):
MCLabelsBase.__init__(self, context)
self.AddParameter("MuonKey", "Name of the muon.", 'MCPrimary')
self.AddParameter("BinWidth", "Bin width [in meters].", 10)
self.AddParameter("ExtendBoundary",
"Extend boundary of convex hull [in meters].",
150)
self.AddParameter("IncludeUnderOverFlow",
"Include over and under flow bins.",
False)
self.AddParameter("ForceNumBins",
"Force number of bins to be this value."
"Will append zeros or remove last bins.",
None)
def Configure(self):
MCLabelsBase.Configure(self)
self._muon_key = self.GetParameter("MuonKey")
self._bin_width = self.GetParameter("BinWidth")
self._extend_boundary = self.GetParameter("ExtendBoundary")
self._force_num_bins = self.GetParameter("ForceNumBins")
self._include_under_over_flow = \
self.GetParameter("IncludeUnderOverFlow")
def Physics(self, frame):
labels = dataclasses.I3MapStringDouble()
binned_energy_losses = mu_utils.get_inf_muon_binned_energy_losses(
frame=frame,
convex_hull=self._convex_hull,
muon=frame[self._muon_key],
bin_width=self._bin_width,
extend_boundary=self._extend_boundary,
include_under_over_flow=self._include_under_over_flow,
)
# force the number of bins to match ForceNumBins
if self._force_num_bins is not None:
num_bins = len(binned_energy_losses)
# too many bins: remove last bins
if num_bins > self._force_num_bins:
binned_energy_losses = \
binned_energy_losses[:self._force_num_bins]
# too few bins: append zeros
elif num_bins < self._force_num_bins:
num_bins_to_add = self._force_num_bins - num_bins
# print('Appending {} zeros'.format(num_bins_to_add))
binned_energy_losses = np.concatenate((
binned_energy_losses, np.zeros(num_bins_to_add)))
# write to frame
for i, energy_i in enumerate(binned_energy_losses):
labels['EnergyLoss_{:04d}'.format(i)] = energy_i
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsMuonEnergyLossesInCylinder(MCLabelsBase):
def __init__(self, context):
MCLabelsBase.__init__(self, context)
self.AddParameter("BinWidth", "Bin width [in meters].", 15)
self.AddParameter("NumBins", "Number of bins to create.", 100)
self.AddParameter("CylinderHeight",
"The height (z) of the axial clinder [in meters].",
1000.)
self.AddParameter("CylinderRadius",
"The radius (x-y) of the axial clinder [in meters].",
600.)
def Configure(self):
MCLabelsBase.Configure(self)
self._bin_width = self.GetParameter("BinWidth")
self._num_bins = self.GetParameter("NumBins")
self._cylinder_height = self.GetParameter("CylinderHeight")
self._cylinder_radius = self.GetParameter("CylinderRadius")
def Physics(self, frame):
# get muon
muon = mu_utils.get_muon(
frame=frame,
primary=frame[self._primary_key],
convex_hull=self._convex_hull,
)
labels = dataclasses.I3MapStringDouble()
binned_energy_losses = mu_utils.get_binned_energy_losses_in_cylinder(
frame=frame,
muon=muon,
bin_width=self._bin_width,
num_bins=self._num_bins,
cylinder_height=self._cylinder_height,
cylinder_radius=self._cylinder_radius,
)
# write to frame
labels['track_anchor_x'] = muon.pos.x
labels['track_anchor_y'] = muon.pos.y
labels['track_anchor_z'] = muon.pos.z
labels['track_anchor_time'] = muon.time
labels['azimuth'] = muon.dir.azimuth
labels['zenith'] = muon.dir.zenith
for i, energy_i in enumerate(binned_energy_losses):
labels['EnergyLoss_{:05d}'.format(i)] = energy_i
frame.Put(self._output_key, labels)
self.PushFrame(frame)
class MCLabelsMuonEnergyLossesMillipede(MCLabelsBase):
def __init__(self, context):
MCLabelsBase.__init__(self, context)
self.AddParameter("BinWidth", "Bin width [in meters].", 15)
self.AddParameter("Boundary",
"Half edge length of a cube [in meters]. " +
"Will be used as a boundary." +
"Millipede default are 600m.",
600.)
self.AddParameter("WriteParticleVector",
"Also writes the labels in form of " +
"a particle vector to be visualized " +
"via steamshovel",
False)
self.AddParameter("MaxNumBins",
"If provided, exactly this number of bins is " +
"added to the labels. Non existing bins are " +
"padded with NaNs. Additional bins are cut off. " +
"This can be useful when writing tabular data " +
"that requires fixed sizes.",
None)
def Configure(self):
MCLabelsBase.Configure(self)
self._bin_width = self.GetParameter("BinWidth")
self._boundary = self.GetParameter("Boundary")
self._write_vector = self.GetParameter("WriteParticleVector")
self._max_num_bins = self.GetParameter("MaxNumBins")
def Physics(self, frame):
# get muon
muon = mu_utils.get_muon(
frame=frame,
primary=frame[self._primary_key],
convex_hull=self._convex_hull,
)
labels = dataclasses.I3MapStringDouble()
if self._write_vector:
binned_energy_losses, bin_center_pos = \
mu_utils.get_binned_energy_losses_in_cube(
frame=frame,
muon=muon,
bin_width=self._bin_width,
boundary=self._boundary,
return_bin_centers=self._write_vector
)
else:
binned_energy_losses = mu_utils.get_binned_energy_losses_in_cube(
frame=frame,
muon=muon,
bin_width=self._bin_width,
boundary=self._boundary,
return_bin_centers=self._write_vector
)
# write to frame
labels['track_anchor_x'] = muon.pos.x
labels['track_anchor_y'] = muon.pos.y
labels['track_anchor_z'] = muon.pos.z
labels['track_anchor_time'] = muon.time
labels['azimuth'] = muon.dir.azimuth
labels['zenith'] = muon.dir.zenith
for i, energy_i in enumerate(binned_energy_losses):
# stop adding energy losses if we reached the maximum
if self._max_num_bins is not None:
if i >= self._max_num_bins:
msg = 'MaxNumBinsis set to {}. '.format(self._max_num_bins)
msg += 'Cutting off an additional {} losses!'.format(
len(binned_energy_losses) - self._max_num_bins)
log_warn(msg)
break
labels['EnergyLoss_{:05d}'.format(i)] = energy_i
# pad rest with NaNs
if self._max_num_bins is not None:
for i in range(len(binned_energy_losses), self._max_num_bins):
labels['EnergyLoss_{:05d}'.format(i)] = float('NaN')
frame.Put(self._output_key, labels)
if self._write_vector:
part_vec = dataclasses.I3VectorI3Particle()
for energy_i, pos_i in zip(binned_energy_losses, bin_center_pos):
part = dataclasses.I3Particle()
part.pos = dataclasses.I3Position(*pos_i)
part.energy = energy_i
part.dir = dataclasses.I3Direction(muon.dir)
part.time = (
(muon.pos - part.pos).magnitude / dataclasses.I3Constants.c
)
part_vec.append(part)
frame.Put(self._output_key + 'ParticleVector', part_vec)
self.PushFrame(frame)
|
StarcoderdataPython
|
1922087
|
""" The MIT License (MIT)
Copyright (c) 2016 <NAME>, University of Massachusetts
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import time
import ctypes as ct
import numpy as np
import csv
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))))
import file_loader as fl
import mdp_value_function as mvf
import nova_mdp as nm
class MDP(nm.NovaMDP):
""" A Markov Decision Process (MDP) object that can load, solve, and save.
Specifically, it is capable of loading raw and cassandra-like MDP files, provides
functionality to solve them using the nova library, and enables saving the resulting
policy as a raw policy file.
"""
def __init__(self):
""" The constructor for the MDP class. """
# Assign a nullptr for the device-side pointers and initial values for the structure variables.
self.n = int(0)
self.ns = int(0)
self.m = int(0)
self.gamma = float(0.9)
self.horizon = int(1)
self.epsilon = float(0.01)
self.s0 = int(0)
self.ng = int(0)
self.goals = ct.POINTER(ct.c_uint)()
self.S = ct.POINTER(ct.c_int)()
self.T = ct.POINTER(ct.c_float)()
self.R = ct.POINTER(ct.c_float)()
self.d_goals = ct.POINTER(ct.c_uint)()
self.d_S = ct.POINTER(ct.c_int)()
self.d_T = ct.POINTER(ct.c_float)()
self.d_R = ct.POINTER(ct.c_float)()
# Additional useful variables not in the structure.
self.Rmin = None
self.Rmax = None
self.cpuIsInitialized = False
self.gpuIsInitialized = False
def __del__(self):
""" The deconstructor for the MDP class. """
self.uninitialize_gpu()
self.uninitialize()
def initialize(self, n, ns, m, gamma, horizon, epsilon, s0, ng):
""" Initialize the MDP object, allocating array memory, given the parameters.
Parameters:
n -- The number of states.
ns -- The maximum number of successor states.
m -- The number of actions.
gamma -- The discount factor between 0 and 1.
horizon -- The positive integer for the horizon.
epsilon -- The convergence criterion for some algorithms.
s0 -- The initial state index (if an SSP MDP).
ng -- The positive integer for number of goals (if an SSP MDP) or 0 (otherwise).
"""
if self.cpuIsInitialized:
return
result = nm._nova.mdp_initialize_cpu(self, n, ns, m, gamma, horizon, epsilon, s0, ng)
if result != 0:
print("Failed to initialize the MDP.")
raise Exception()
self.cpuIsInitialized = True
def uninitialize(self):
""" Uninitialize the MDP object, freeing the allocated memory. """
if not self.cpuIsInitialized:
return
result = nm._nova.mdp_uninitialize_cpu(self)
if result != 0:
print("Failed to uninitialize the MDP.")
raise Exception()
self.cpuIsInitialized = False
def initialize_gpu(self):
""" Initialize the GPU variables. This only needs to be called if GPU algorithms are used. """
if self.gpuIsInitialized:
return
result = nm._nova.mdp_initialize_successors_gpu(self)
result += nm._nova.mdp_initialize_state_transitions_gpu(self)
result += nm._nova.mdp_initialize_rewards_gpu(self)
if self.ng > 0:
result += nm._nova.mdp_initialize_goals_gpu(self)
if result != 0:
print("Failed to initialize the 'nova' library's GPU variables for the MDP.")
raise Exception()
self.gpuIsInitialized = True
def uninitialize_gpu(self):
""" Uninitialize the GPU variables. This only needs to be called if GPU algorithms are used. """
if not self.gpuIsInitialized:
return
result = nm._nova.mdp_uninitialize_successors_gpu(self)
result += nm._nova.mdp_uninitialize_state_transitions_gpu(self)
result += nm._nova.mdp_uninitialize_rewards_gpu(self)
if self.ng > 0:
result += nm._nova.mdp_uninitialize_goals_gpu(self)
if result != 0:
print("Failed to uninitialize the 'nova' library's GPU variables for the MDP.")
raise Exception()
self.gpuIsInitialized = False
def load(self, filename, filetype='cassandra', scalarize=lambda x: x[0]):
""" Load a Multi-Objective POMDP file given the filename and optionally the file type.
Parameters:
filename -- The name and path of the file to load.
filetype -- Either 'cassandra' or 'raw'. Default is 'cassandra'.
scalarize -- Optionally define a scalarization function. Only used for 'raw' files.
Default returns the first reward.
"""
# Before anything, uninitialize the current MDP.
self.uninitialize_gpu()
self.uninitialize()
# Now load the file based on the desired file type.
fileLoader = fl.FileLoader()
if filetype == 'cassandra':
fileLoader.load_cassandra(filename)
elif filetype == 'raw':
fileLoader.load_raw_mdp(filename, scalarize)
else:
print("Invalid file type '%s'." % (filetype))
raise Exception()
# Allocate the memory on the C-side. Note: Allocating on the Python-side will create managed pointers.
self.initialize(fileLoader.n, fileLoader.ns, fileLoader.m,
fileLoader.gamma, fileLoader.horizon, fileLoader.epsilon,
fileLoader.s0, fileLoader.ng)
# Flatten all of the file loader data.
fileLoader.goals = fileLoader.goals.flatten()
fileLoader.S = fileLoader.S.flatten()
fileLoader.T = fileLoader.T.flatten()
fileLoader.R = fileLoader.R.flatten()
# Copy all of the variables' data into these arrays.
for i in range(self.ng):
self.goals[i] = fileLoader.goals[i]
for i in range(self.n * self.m * self.ns):
self.S[i] = fileLoader.S[i]
self.T[i] = fileLoader.T[i]
for i in range(self.n * self.m):
self.R[i] = fileLoader.R[i]
self.Rmin = fileLoader.Rmin
self.Rmax = fileLoader.Rmax
# TODO: REMOVE THIS. IT HAS BEEN REPLACED BY SEPARATE CLASSES.
def solve(self, algorithm='vi', process='gpu', numThreads=1024, heuristic=None):
""" Solve the MDP using the nova Python wrapper.
Parameters:
algorithm -- The algorithm to use, either 'vi', 'lao*', or 'rtdp'. Default is 'vi'.
process -- Use the 'cpu' or 'gpu'. If 'gpu' fails, it tries 'cpu'. Default is 'gpu'.
numThreads -- The number of CUDA threads to execute (multiple of 32). Default is 1024.
heuristic -- For 'lao*', this function or list maps state indexes to heuristic values.
Optional. Default value is None, yielding an n-array of zeros.
Returns:
V, pi, timing -- If algorithm is 'vi'.
r, S, V, pi, timing -- If algorithm is 'lao*'.
r -- The size of the valid states found by heuristic search algorithms (e.g., 'lao*').
S -- The actual r state indexes found by heuristic search algorithms (e.g., 'lao*').
V -- The values of each state, mapping states to values. In heuristic search, S contains the state index.
pi -- The policy, mapping states to actions. In heuristic search, S contains the state index.
timing -- A pair (wall-time, cpu-time) for solver execution time, not including (un)initialization.
"""
# Create V and pi, assigning them their respective initial values.
Vinitial = np.array([0.0 for s in range(self.n)])
if self.gamma < 1.0:
Vinitial = np.array([float(self.Rmin / (1.0 - self.gamma)) for s in range(self.n)])
# Create a function to convert a flattened numpy arrays to a C array, then convert Vinitial.
array_type_n_float = ct.c_float * self.n
Vinitial = array_type_n_float(*Vinitial)
# For informed search algorithms, define the heuristic, which is stored in V initially.
if algorithm == 'lao*' and heuristic is not None:
self.V = array_type_n_float(*np.array([float(heuristic[s]) for s in range(self.n)]))
policy = ct.POINTER(mvf.MDPValueFunction)()
timing = None
# If the process is 'gpu', then attempt to solve it. If an error arises, then
# assign process to 'cpu' and attempt to solve it using that.
if process == 'gpu':
timing = (time.time(), time.clock())
if algorithm == 'vi':
result = nm._nova.mdp_vi_complete_gpu(self, int(numThreads), Vinitial,
ct.byref(policy))
elif algorithm == 'lao*':
result = nm._nova.ssp_lao_star_complete_gpu(self, int(numThreads), Vinitial,
ct.byref(policy))
elif algorithm == 'rtdp':
result = nm._nova.ssp_rtdp_complete_gpu(self, int(numThreads), Vinitial,
ct.byref(policy))
timing = (time.time() - timing[0], time.clock() - timing[1])
if result != 0:
print("Failed to execute the 'nova' library's GPU MDP solver.")
process = 'cpu'
# If the process is 'cpu', then attempt to solve it.
if process == 'cpu':
timing = (time.time(), time.clock())
if algorithm == 'vi':
result = nm._nova.mdp_vi_complete_cpu(self, Vinitial, ct.byref(policy))
elif algorithm == 'lao*':
result = nm._nova.ssp_lao_star_complete_cpu(self, Vinitial, ct.byref(policy))
elif algorithm == 'rtdp':
result = nm._nova.ssp_rtdp_complete_cpu(self, Vinitial, ct.byref(policy))
timing = (time.time() - timing[0], time.clock() - timing[1])
if result != 0:
print("Failed to execute the 'nova' library's CPU MDP solver.")
raise Exception()
# Dereference the pointer (this is how you do it in ctypes).
policy = policy.contents
return policy, timing
def __str__(self):
""" Return the string of the MDP values akin to the raw file format.
Returns:
The string of the MDP in a similar format as the raw file format.
"""
result = "n: " + str(self.n) + "\n"
result += "m: " + str(self.m) + "\n"
result += "ns: " + str(self.ns) + "\n"
result += "s0: " + str(self.s0) + "\n"
result += "goals: " + str([self.goals[i] for i in range(self.ng)]) + "\n"
result += "horizon: " + str(self.horizon) + "\n"
result += "gamma: " + str(self.gamma) + "\n\n"
result += "S(s, a, s'):\n%s" % (str(np.array([self.S[i] \
for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + "\n\n"
result += "T(s, a, s'):\n%s" % (str(np.array([self.T[i] \
for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + "\n\n"
result += "R(s, a):\n%s" % (str(np.array([self.R[i] \
for i in range(self.n * self.m)]).reshape((self.n, self.m)))) + "\n\n"
return result
|
StarcoderdataPython
|
3321690
|
<reponame>AustinSmith29/basketball_reference_web_scraper<gh_stars>1-10
from unittest import TestCase, mock
from basketball_reference_web_scraper.data import OutputWriteOption
from basketball_reference_web_scraper.writers import CSVWriter, WriteOptions
class TestCSVWriter(TestCase):
DATA = ["some", "row", "data"]
COLUMN_NAMES = ["some", "column", "names"]
@mock.patch("basketball_reference_web_scraper.writers.csv.DictWriter")
def test_opens_correct_file(self, mock_csv_dict_writer):
with mock.patch("builtins.open", mock.mock_open()) as mock_file:
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
row_formatter = mock.Mock(format=mock.Mock())
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
writer = CSVWriter(column_names=self.COLUMN_NAMES, row_formatter=row_formatter)
writer.write(
data=self.DATA,
options=WriteOptions(
file_path="some file path",
mode=OutputWriteOption.WRITE
)
)
mock_file.assert_called_with("some file path", OutputWriteOption.WRITE.value, newline="", encoding="utf8")
@mock.patch("basketball_reference_web_scraper.writers.csv.DictWriter")
def test_file_and_columns_are_used_by_writer(self, mock_csv_dict_writer):
with mock.patch("builtins.open", mock.mock_open()) as mock_file:
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
row_formatter = mock.Mock(format=mock.Mock())
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
writer = CSVWriter(column_names=self.COLUMN_NAMES, row_formatter=row_formatter)
writer.write(
data=self.DATA,
options=WriteOptions(
file_path="some file path",
mode=OutputWriteOption.WRITE
)
)
mock_csv_dict_writer.assert_called_with(mock_file(), fieldnames=self.COLUMN_NAMES)
@mock.patch("basketball_reference_web_scraper.writers.csv.DictWriter")
def test_header_is_written(self, mock_csv_dict_writer):
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
row_formatter = mock.Mock(format=mock.Mock())
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
writer = CSVWriter(column_names=self.COLUMN_NAMES, row_formatter=row_formatter)
writer.write(
data=self.DATA,
options=WriteOptions(
file_path="some file path",
mode=OutputWriteOption.WRITE
)
)
csv_dict_writer.writeheader.assert_called_once_with()
@mock.patch("basketball_reference_web_scraper.writers.csv.DictWriter")
def test_rows_are_written(self, mock_csv_dict_writer):
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
row_formatter = mock.Mock(format=mock.Mock())
csv_dict_writer = mock.Mock(wrteheader=mock.Mock(), writerows=mock.Mock())
mock_csv_dict_writer.return_value = csv_dict_writer
writer = CSVWriter(column_names=self.COLUMN_NAMES, row_formatter=row_formatter)
writer.write(
data=self.DATA,
options=WriteOptions(
file_path="some file path",
mode=OutputWriteOption.WRITE
)
)
csv_dict_writer.writerows.assert_called_once_with(mock.ANY)
self.assertEqual(3, row_formatter.format.call_count)
row_formatter.format.assert_has_calls(
calls=[
mock.call("some"),
mock.call("row"),
mock.call("data"),
],
any_order=False
)
|
StarcoderdataPython
|
6444046
|
<filename>tests/__init__.py
""" unit test """
import difflib
import inspect
import json
import logging
import os
import sys
import tempfile
from io import StringIO
from logging import Handler
from random import random
from unittest.case import TestCase
from bzt.cli import CLI
from bzt.engine import SelfDiagnosable
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.six import u
from bzt.utils import run_once, EXE_SUFFIX, get_full_path
TestCase.shortDescription = lambda self: None # suppress nose habit to show docstring instead of method name
@run_once
def setup_test_logging():
""" set up test logging for convenience in IDE """
root = logging.getLogger('')
if not root.handlers:
CLI.log = None
CLI.verbose = True
CLI.setup_logging(CLI)
else:
root.debug("Already set up logging")
setup_test_logging()
logging.info("Bootstrapped test")
def __dir__():
filename = inspect.getouterframes(inspect.currentframe())[1][1]
return os.path.dirname(filename)
# execute tests regardless of working directory
root_dir = __dir__() + '/../'
os.chdir(root_dir)
RESOURCES_DIR = os.path.join(__dir__(), 'resources') + os.path.sep
BUILD_DIR = __dir__() + "/../build/tmp/"
TEST_DIR = __dir__() + "/../build/test/"
BASE_CONFIG = __dir__() + "/../bzt/resources/base-config.yml"
def r(mul=5):
return 1 + int(mul * random()) / 1000.0
def rc():
return "%s00" % (int(4 * random()) + 1)
def err():
if int(50 * random()) == 0:
return "Some Error"
else:
return None
def random_sample(ts, label='', conc=1):
return ts, label, conc, r(), r(), r(), rc(), err()
def random_datapoint(n):
point = DataPoint(n)
overall = point[DataPoint.CURRENT].setdefault('', KPISet())
overall[KPISet.CONCURRENCY] = r(100)
overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) + 1
overall[KPISet.SUCCESSES] = int(overall[KPISet.SAMPLE_COUNT] * random())
overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] - overall[KPISet.SUCCESSES]
overall[KPISet.PERCENTILES]['25.0'] = r(10)
overall[KPISet.PERCENTILES]['50.0'] = r(20)
overall[KPISet.PERCENTILES]['75.0'] = r(30)
overall[KPISet.PERCENTILES]['90.0'] = r(40)
overall[KPISet.PERCENTILES]['99.0'] = r(50)
overall[KPISet.PERCENTILES]['100.0'] = r(100)
overall[KPISet.RESP_CODES][rc()] = 1
overall[KPISet.AVG_RESP_TIME] = r(100)
overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
overall.sum_rt = overall[KPISet.AVG_RESP_TIME] * overall[KPISet.SAMPLE_COUNT]
overall.sum_cn = overall[KPISet.AVG_CONN_TIME] * overall[KPISet.SAMPLE_COUNT]
overall.sum_lt = overall[KPISet.AVG_LATENCY] * overall[KPISet.SAMPLE_COUNT]
cumul = point[DataPoint.CUMULATIVE].setdefault('', KPISet())
cumul.merge_kpis(overall)
cumul.recalculate()
point.recalculate()
overall[KPISet.AVG_RESP_TIME] = r(100)
overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
return point
def close_reader_file(obj):
if obj and obj.file and obj.file.fds:
obj.file.fds.close()
class BZTestCase(TestCase):
def setUp(self):
self.captured_logger = None
self.log_recorder = None
self.func_args = []
self.func_results = None
def func_mock(self, *args, **kwargs):
self.func_args.append({'args': args, 'kargs': kwargs})
if isinstance(self.func_results, list):
return self.func_results.pop(0)
else:
return self.func_results
def sniff_log(self, log):
self.log_recorder = RecordingHandler()
self.captured_logger = log
self.captured_logger.addHandler(self.log_recorder)
def tearDown(self):
exc, _, _ = sys.exc_info()
if exc:
try:
if hasattr(self, 'obj') and isinstance(self.obj, SelfDiagnosable):
diags = self.obj.get_error_diagnostics()
if diags:
for line in diags:
logging.info(line)
except BaseException:
pass
if self.captured_logger:
self.captured_logger.removeHandler(self.log_recorder)
self.log_recorder.close()
def assertFilesEqual(self, expected, actual, replace_str="", replace_with=""):
# import shutil; shutil.copy(actual, expected)
with open(expected) as exp, open(actual) as act:
act_lines = [x.replace(replace_str, replace_with).rstrip() for x in act.readlines()]
exp_lines = [x.replace(replace_str, replace_with).rstrip() for x in exp.readlines()]
diff = list(difflib.unified_diff(exp_lines, act_lines))
if diff:
logging.info("Replacements are: %s => %s", replace_str, replace_with)
msg = "Failed asserting that two files are equal:\n" + actual + "\nversus\n" + expected + "\nDiff is:\n"
raise AssertionError(msg + "\n".join(diff))
def assertPathsEqual(self, p1, p2):
if not isinstance(p1, list):
p1 = [p1]
if not isinstance(p2, list):
p2 = [p2]
for num in range(len(p1)):
self.assertEqual(get_full_path(p1[num]), get_full_path(p2[num]))
def local_paths_config():
""" to fix relative paths """
dirname = os.path.dirname(__file__)
fds, fname = tempfile.mkstemp()
os.close(fds)
settings = {
"modules": {
"jmeter": {
"path": RESOURCES_DIR + "jmeter/jmeter-loader" + EXE_SUFFIX,
},
"grinder": {
"path": RESOURCES_DIR + "grinder/fake_grinder.jar",
},
"gatling": {
"path": RESOURCES_DIR + "gatling/gatling" + EXE_SUFFIX,
},
"junit": {
"path": dirname + "/../build/selenium/tools/junit/junit.jar",
"selenium-server": dirname + "/../build/selenium/selenium-server.jar"
}
}
}
jstring = json.dumps(settings)
with open(fname, 'w') as fds:
fds.write(jstring)
return fname
class RecordingHandler(Handler):
def __init__(self):
super(RecordingHandler, self).__init__()
self.info_buff = StringIO()
self.err_buff = StringIO()
self.debug_buff = StringIO()
self.warn_buff = StringIO()
def emit(self, record):
"""
:type record: logging.LogRecord
:return:
"""
if record.levelno == logging.INFO:
self.write_log(self.info_buff, record.msg, record.args)
elif record.levelno == logging.ERROR:
self.write_log(self.err_buff, record.msg, record.args)
elif record.levelno == logging.WARNING:
self.write_log(self.warn_buff, record.msg, record.args)
elif record.levelno == logging.DEBUG:
self.write_log(self.debug_buff, record.msg, record.args)
def write_log(self, buff, str_template, args):
str_template += "\n"
if args:
buff.write(u(str_template % args))
else:
buff.write(u(str_template))
|
StarcoderdataPython
|
1754237
|
<filename>tests/kyu_8_tests/test_smallest_unused_id.py
import unittest
from katas.kyu_8.smallest_unused_id import next_id
class NextIDTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(next_id([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 11)
def test_equals_2(self):
self.assertEqual(next_id([5, 4, 3, 2, 1]), 0)
def test_equals_3(self):
self.assertEqual(next_id([0, 1, 2, 3, 5]), 4)
def test_equals_4(self):
self.assertEqual(next_id([0, 0, 0, 0, 0, 0]), 1)
def test_equals_5(self):
self.assertEqual(next_id([]), 0)
|
StarcoderdataPython
|
3280441
|
<filename>django_admin_json_editor/admin.py
import json
from django import forms
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class JSONEditorWidget(forms.Widget):
template_name = 'django_admin_json_editor/editor.html'
def __init__(self, schema, collapsed=True, sceditor=False, editor_options=None, attrs=None):
super(JSONEditorWidget, self).__init__()
self._schema = schema
self._collapsed = collapsed
self._sceditor = sceditor
self._editor_options = editor_options or {}
self.attrs = attrs or {}
def render(self, name, value, renderer=None):
if callable(self._schema):
schema = self._schema(self)
else:
schema = self._schema
schema['title'] = ' '
schema['options'] = {'collapsed': int(self._collapsed)}
editor_options = {
'theme': 'bootstrap3',
'iconlib': 'fontawesome4',
'schema': schema,
}
editor_options.update(self._editor_options)
context = {
'name': name,
'data': value,
'sceditor': int(self._sceditor),
'editor_options': json.dumps(editor_options),
'attrs': self.attrs
}
return mark_safe(render_to_string(self.template_name, context))
@property
def media(self):
css = {
'all': [
'django_admin_json_editor/bootstrap/css/bootstrap.min.css',
'django_admin_json_editor/fontawesome/css/font-awesome.min.css',
'django_admin_json_editor/style.css',
]
}
js = [
'django_admin_json_editor/jquery/jquery.min.js',
'django_admin_json_editor/bootstrap/js/bootstrap.min.js',
'django_admin_json_editor/jsoneditor/jsoneditor.min.js',
]
if self._sceditor:
css['all'].append(
'django_admin_json_editor/sceditor/themes/default.min.css')
js.append(
'django_admin_json_editor/sceditor/jquery.sceditor.bbcode.min.js')
return forms.Media(css=css, js=js)
|
StarcoderdataPython
|
3398627
|
import os
#path = "/usr/src/app/data"
#path = "./data"
#os.chdir(path)
path = os.path.dirname(os.path.realpath('__file__'))
dirPath = path + "/data/"
def handleFile(filePath):
print(__file__)
with open(filePath, 'r') as f:
print(f.read())
# For all files
for file in os.listdir(dirPath):
if file.endswith(".txt"):
filePath = dirPath + file
handleFile(filePath)
|
StarcoderdataPython
|
3222448
|
"""Data object helpers."""
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any, Dict, List
# pylint: disable=inconsistent-return-statements
# List/Dict object contians only know types
def set_default(obj):
"""Make ListObject/DictObject into list/dict for serialization."""
if isinstance(obj, ListObject):
# pylint: disable=W0212
return obj._data
if isinstance(obj, DictObject):
# pylint: disable=W0212
return obj._data
class ListObject:
"""Represent a list with property access to sub-dict/list objects."""
# pylint: disable=unused-variable
def __init__(self, data: list):
"""Initialize."""
self._data = [] # type: List[Any]
for pos, item in enumerate(data):
if isinstance(item, dict):
self._data.append(DictObject(item))
elif isinstance(item, list):
self._data.append(ListObject(item))
else:
self._data.append(item)
def __iter__(self):
"""Iterator."""
for item in self._data:
yield item
def __getitem__(self, idx):
"""Iterator."""
if idx < 0 or idx >= len(self._data):
raise KeyError
return self._data[idx]
def __len__(self):
"""Return number of items."""
return len(self._data)
def get(self, idx):
"""Return item based on index."""
return self._data[idx]
def __str__(self):
"""Return string (json) representing the object."""
return self.json
@property
def data(self):
"""Return data object."""
return self._data
@property
def json(self) -> str:
"""Return a well-formated json string."""
return json.dumps(self._data, sort_keys=True, indent=4, default=set_default)
class DictObject:
"""Represent a dict with property access to sub-dict/list objects."""
def __init__(self, data: dict):
"""Initialize."""
self._data = {} # type: Dict[Any, Any]
for key in data:
if isinstance(data[key], dict):
self._data[key] = DictObject(data[key])
elif isinstance(data[key], list):
self._data[key] = ListObject(data[key])
else:
self._data[key] = data[key]
def __getattr__(self, name):
"""Allow property name access to data."""
if name in self._data:
return self._data[name]
return object.__getattribute__(self, name)
def __iter__(self):
"""Iterator."""
for item in self._data:
yield item
def __getitem__(self, key):
"""Iterator."""
if key in self._data:
return self._data[key]
raise KeyError
def __len__(self):
"""Return number of items."""
return len(self._data)
def get(self, key):
"""Return key."""
return self._data.get(key)
def __str__(self):
"""Return string (json) representing the object."""
return self.json
@property
def data(self):
"""Return data object."""
return self._data
@property
def json(self) -> str:
"""Return a well-formated json string."""
return json.dumps(self._data, sort_keys=True, indent=4, default=set_default)
|
StarcoderdataPython
|
8136223
|
<filename>tests/poc/test_pipeline.py<gh_stars>1-10
import os
import unittest
import pandas as pd
from evaluator.models.document import Document
from evaluator.models.standard_item import StandardItem
from evaluator.models.standard_item_keyword import StandardItemKeyword
from evaluator.models.disclosure_score import DisclosureScore
from evaluator.models.normalized_result import NormalizedResult
from evaluator.normalizer.keyword_normalizer import KeywordNormalizer
class TestPipeline(unittest.TestCase):
def test_pipeline(self):
path = os.path.join(os.path.dirname(__file__), "../_data/evaluation_test.xlsx")
documents = self.to_documents(pd.read_excel(path, sheet_name="documents"))
standard_items = self.to_standard_items(pd.read_excel(path, sheet_name="standards"))
normalized_result_df = pd.read_excel(path, sheet_name="normalized_result")
normalizer = KeywordNormalizer(documents)
ns = []
ds = []
for s in standard_items:
normalized_results, disclosure_score = normalizer.normalize(
company_id=9999,
fiscal_year=9999,
standard_item=s)
if len(normalized_results) > 0:
ns += normalized_results
if disclosure_score is not None:
ds.append(disclosure_score)
hit_count = normalized_result_df[
normalized_result_df["theme"] == s.theme]["hit_count"].values[0]
self.assertEqual(hit_count, disclosure_score.hit_count)
n_df = NormalizedResult.to_tables(ns)
d_df = DisclosureScore.to_tables(ds)
def to_documents(self, documents_df: pd.DataFrame) -> [dict]:
documents = []
for i, row in documents_df.iterrows():
sections = row["section"].split("\n") if isinstance(row["section"], str) else []
d = Document(
document_id=i,
resource_id=i,
company_id=9999,
fiscal_year=9999,
body=row["text"],
lang="ja",
head=row["chapter"],
sections=sections
)
documents.append(d)
return documents
def to_standard_items(self, standard_df: pd.DataFrame, lang="ja") -> [dict]:
standard_items = []
column = "keywords"
if lang == "ja":
column = "keywords_ja"
for i, row in standard_df.iterrows():
s = StandardItem(
standard_item_id=i,
subject=row["subject"],
theme=row["theme"],
keywords=[]
)
keywords = []
ks = row[column].split("\n")
for j, k in enumerate(ks):
q = StandardItemKeyword(
standard_item_keyword_id=i * 100 + j,
keyword=k,
query=k
)
keywords.append(q)
s.keywords = keywords
standard_items.append(s)
return standard_items
|
StarcoderdataPython
|
1915704
|
<gh_stars>0
import json
f = open("../../config/executer.txt")
processors = []
action_map = {}
actions = []
primitive_list = []
primitive_num = 0
primitive_idx = 0
cur_idx = 0
while True:
line = f.readline()
print(line)
if not line:
break
if line == "\n":
continue
l = line.split()
if l[0] == 'e':
processors.append(l[1])
action_map[processors[-1]] = []
# action_map[processors[-1]] = {}
if l[0] == 'a':
actions.append(l[1])
action_map[processors[-1]].append({})
action_map[processors[-1]][-1]["action_name"] = l[1]
action_map[processors[-1]][-1]["parameter_num"] = int(l[2])
action_map[processors[-1]][-1]["primitives"] = []
# action_map[processors[-1]][actions[-1]] = {}
#
# action_map[processors[-1]][actions[-1]]["action_name"] = l[1]
# action_map[processors[-1]][actions[-1]]["parameter_num"] = int(l[2])
# action_map[tables[-1]][actions[-1]]["primitives"] = {}
primitive_num = int(l[3])
# primitive_list = []
cur_idx = 0
elif l[0] == 'p':
primitive_map = {}
primitive_name = l[1]
primitive_map["primitive_name"] = l[1]
parameters = []
for i in range(len(l)-2):
header_name = l[2+i].split('.')[0]
field_name = l[2+i].split('.')[1]
parameter_map = {}
parameter_map["type"] = header_name
parameter_map["value"] = field_name
parameters.append(parameter_map)
primitive_map["parameters"] = parameters
action_map[processors[-1]][-1]["primitives"].append(primitive_map)
# primitive_list.append(primitive_map)
# primitive_idx += 1
# if primitive_idx == primitive_num:
# action_map[processors[-1]][actions[-1]]["primitives"] = primitive_list
# primitive_list = []
# primitive_idx = 0
# idx = "primitive"+str(cur_idx)
# cur_idx += 1
# action_map[tables[-1]][actions[-1]]["primitives"][idx] = {}
# action_map[tables[-1]][actions[-1]]["primitives"][idx]["primitive_name"] = primitive_name
# action_map[tables[-1]][actions[-1]]["primitives"][idx]["parameters"] = {}
# for i in range(len(l)-2):
# primitive_parameter_map = {}
# header_name = l[2+i].split('.')[0]
# field_name = l[2+i].split('.')[1]
# primitive_parameter_map["type"] = header_name
# primitive_parameter_map["value"] = field_name
#
# action_map[tables[-1]][actions[-1]]["primitives"][idx]["parameters"]["parameter"+str(i)] = primitive_parameter_map
# action_map[tables[-1]][actions[-1]]["primitives"][idx][] = {}
# action_map[tables[-1]][actions[-1]]["primitives"][idx]
# action_map[tables[-1]][actions[-1]]["primitives"]["primitive"+str(i)][l[2+i]] = primitive_parameter_map
f.close()
print(action_map)
print(json.dumps(action_map, indent=3))
filename = "../../config/executer.json"
with open(filename, 'w') as file_obj:
json.dump(action_map, file_obj, indent=3)
|
StarcoderdataPython
|
3459668
|
import numpy as np
import random, json
import nltk_utils
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_words, tokeniser, stem
import model
from model import Network
with open('intents.json','r') as f:
intents=json.load(f)
Words=[]
tags=[]
xy=[]
for intent in intents['intents']:
tag=intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w=tokeniser(pattern)
Words.extend(w)
xy.append((w,tag))
words_to_ignore=['?',".",'!']
Words=[stem(w) for w in Words if w not in words_to_ignore]
#Sorting all words, removing duplicated
Words=sorted(set(Words))
tags= sorted(set(tags))
#Creating trainin dataset
X_train=[]
Y_train=[]
for (pattern_sentence, tag) in xy:
#X: Bag of words for each pattern sentence
bag= bag_words(pattern_sentence, Words)
X_train.append(bag)
#Y: Class labels
label=tags.index(tag)
Y_train.append(label)
X_train=np.array(X_train)
Y_train=np.array(Y_train)
#Specifying hyper parameters
eporchs=50
batch_size=8
lr=0.0001
inputs=len(X_train[0])
hidden=8
outputs= len(tags)
class ChataData(Dataset):
def __init__(self):
self.n_samples= len(X_train)
self.x_data= X_train
self.y_data= Y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
chat_data=ChataData()
train_loader= DataLoader(dataset=chat_data, batch_size=batch_size, shuffle=True, num_workers=2)
device= torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model= Network(inputs, hidden, outputs).to(device)
criterion=nn.CrossEntropyLoss()
optimiser=torch.optim.Adam(model.parameters(), lr=lr)
for epoch in range(eporchs):
for(words, labels) in train_loader:
words= words.to(device)
labels=labels.to(device)
#forward pass
outputs= model(words)
loss=criterion(outputs, labels)
#backprop and optimisation
optimiser.zero_grad()
loss.backward()
optimiser.step()
if(epoch+1)%100==0:
print(f'Epoch [{epoch+1}/{eporchs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
models={
"model_state": model.state_dict(),
"inputs": inputs,
"hidden": hidden,
"outputs": outputs,
"tags": tags
}
File="models.pth"
torch.save(models, File)
print(f'training complete. Model saved to {File}')
|
StarcoderdataPython
|
1921325
|
from collections import namedtuple
class GoldenFile:
"""Record the correct running results in a file for comparing with following test results."""
def __init__(self, filename):
self.filename = filename
def make(self, text):
with open(self.filename, 'w+') as f:
f.write(text)
def read(self):
with open(self.filename) as f:
s = f.read()
return s
def namedlist(names, *args):
"""
将一个无命名的元组列表转为一个有命名的元组列表,作为一个生成器提供数据。
用法示例:
for t in namedlist('name password',
('user1', '<PASSWORD>'),
('user2', '<PASSWORD>')):
assert login(t.name, t.password)
:param names: 用空格分开的元组字段名列表,如"field1 field2 field3"
:param args: 每个参数是一个元组,如(1,2,3)
:return: 一个迭代器
"""
T = namedtuple('T', names)
for x in args:
yield T(*x)
|
StarcoderdataPython
|
1628400
|
<gh_stars>0
"""
10 - Faca uma funcao que receba dois numeros e retorne qual deles e o maior.
"""
n1 = int(input('Digite o primeiro numero: '))
n2 = int(input('Digite o segundo numero: '))
def maior(n1, n2):
if n1 > n2:
return f'O maior numero e: {n1}'
return f'O maior numero e: {n2}'
print(maior(n1, n2))
|
StarcoderdataPython
|
8165707
|
<gh_stars>0
import dsfs.clustering as under_test
leaf1 = under_test.Leaf([10, 20])
leaf2 = under_test.Leaf([30, -15])
merged = under_test.Merged((leaf1, leaf2), order=1)
def test_num_differences():
assert under_test.num_differences([1, 2, 3], [2, 1, 3]) == 2
assert under_test.num_differences([1, 2], [1, 2]) == 0
def test_get_values():
assert under_test.get_values(merged) == [[10, 20], [30, -15]]
|
StarcoderdataPython
|
1705095
|
# example to understand variables
a = [2, 4, 6]
b = a
a.append(8)
print(b)
# example to understand variable scope
a = 10
b = 20
def my_function():
global a
a = 11
b = 21
my_function()
print(a) # prints11
print(b) # prints20
# example to understand the conditions
x = "one"
if x == 0:
print("False")
elif x == 1:
print("True")
else:
print("Something else")
# prints 'Something else'
# example to understand looping
words = ["cat", "dog", "elephant"]
for w in words:
print(w)
|
StarcoderdataPython
|
256228
|
<filename>module/user/get_msg_from_db.py<gh_stars>1-10
# !/uer/bin/env python3
# coding=utf-8
import re
from base.db_manager import mysql
from base.logger import logged, LOGGER
@logged
def get_msg_from_db(phone) -> int:
with mysql() as cur:
cur.execute('select msg from czb_message.sms_log where mobile=%s group by send_time DESC limit 1', (phone,))
result = cur.fetchall()
re_r = re.compile("\d{4}", re.S)
try:
code = re.findall(re_r, result[0].get('msg'))
if len(code):
return code[0]
except TypeError as e:
LOGGER.error("运行失败,查询结果不能正常匹配!异常:{}".format(e))
return False
|
StarcoderdataPython
|
9691957
|
import abc
import tcod
class Console(abc.ABC):
def __init__(self, x: int, y: int,
width: int, height: int) -> None:
self.x = x
self.y = y
self.width = width
self.height = height
self.console = tcod.console_new(self.width, self.height)
@abc.abstractmethod
def render(self) -> None:
pass
def blit(self, target: tcod.console.Console) -> None:
self.console.blit(
0, 0, self.width, self.height,
target, self.x, self.y)
def clear(self) -> None:
tcod.console_clear(self.console)
|
StarcoderdataPython
|
1758516
|
<reponame>peshmerge/managing_big_data_practicals<gh_stars>0
"""
This computes the inverted index for a document base in /data/doina/Gutenberg-EBooks.
This program is written in Python2
To execute on a machine:
time spark-submit IINDEX-s2801620-s2449471-MKMPM.py 2> /dev/null
"""
from pyspark import SparkContext
# fetch spark context and set log level to ERROR only
sc = SparkContext(appName="IINDEX-MKMPM")
sc.setLogLevel("ERROR")
# read all input files
rdd = sc.wholeTextFiles("/data/doina/Gutenberg-EBooks")
# using flatMapValues to map file, contents of RDD to file, set(unique words)
words_iindex = rdd.flatMapValues(lambda contents: set(contents.lower().split()))
# swapping (file, word) to (word, set(file)) because of inverse index
words_iindex = words_iindex.map(lambda (file, word): (word,{file}))
# reducing by key to get desired result
words_iindex = words_iindex.reduceByKey(lambda a,b:a|b)
# filter and process all the above steps
words_iindex_collected = words_iindex.filter(lambda item: len(item[1]) > 12).collect()
# print the words which occur at least 13 docs
for (word, docs) in words_iindex_collected:
print word,
|
StarcoderdataPython
|
11211366
|
import data
from pyoram import utils
FILE_NAME = 'data%d.oram'
class Stash:
def __init__(self):
if not data.is_folder(utils.STASH_FOLDER_NAME):
data.create_folder(utils.STASH_FOLDER_NAME)
def get_filename(self, data_id):
return FILE_NAME % data_id
def add_file(self, data_id, main_part):
with data.open_data_file_in_stash(self.get_filename(data_id), utils.WRITE_BINARY_MODE) as data_item:
data_item.write(main_part)
def open_file(self, data_id):
with data.open_data_file_in_stash(self.get_filename(data_id), utils.READ_BINARY_MODE) as data_item:
data_block = data_item.read()
return data_block
def delete_data_items(self, data_ids):
for data_id in data_ids:
self.delete_data_item(data_id)
def delete_data_item(self, data_id):
data.delete_file_in_stash(self.get_filename(data_id))
def get_data_item(self, data_id):
if data.is_file_in_stash(self.get_filename(data_id)):
with data.open_data_file_in_stash(self.get_filename(data_id), utils.READ_BINARY_MODE) as data_item:
return data_id, data_item.read()
def get_potential_data_id(self):
data_ids = []
stash_file_names = data.get_file_names_from_stash()
for file_name in stash_file_names:
data_ids.append(int(file_name[4:-5]))
return data_ids
def get_stash_size(self):
return data.get_stash_size()
|
StarcoderdataPython
|
236608
|
<reponame>tristansgray/simian
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
"""Top level __init__ for admin package."""
import collections
import datetime
import logging
import os
import re
import urllib
import webapp2
from google.appengine.ext.webapp import template
from simian import settings
from simian.mac.admin import xsrf
from simian.mac.common import auth
QUERY_LIMITS = [25, 50, 100, 250, 500, 1000, 2000]
DEFAULT_COMPUTER_FETCH_LIMIT = 25
class Error(Exception):
"""Base Error."""
def GetMenu():
"""Returns an OrderedDict with menu contents."""
menu = collections.OrderedDict()
menu_items = [
{'type': 'summary', 'url': '/admin', 'name': 'Summary'},
{'type': 'search', 'url': 'javascript:simian.showSearch(); void(0);',
'name': 'Search'},
{'type': 'munki_packages', 'name': 'Munki Packages', 'subitems': [
{'type': 'packages', 'url': '/admin/packages',
'name': 'Package Admin'},
{'type': 'proposals', 'url': '/admin/proposals',
'name': 'Pending Proposals'},
{'type': 'package_logs', 'url': '/admin/packages/logs',
'name': 'Package Logs'},
{'type': 'proposal_logs', 'url': '/admin/proposals/logs',
'name': 'Proposal Logs'},
{'type': 'packages_historical',
'url': '/admin/packages?historical=1', 'name': 'Historical List'},
{'type': 'packages_installs', 'url': '/admin/installs',
'name': 'Installs'},
{'type': 'packages_failures',
'url': '/admin/installs?failures=1', 'name': 'Failures'},
{'type': 'packages_problems', 'url': '/admin/installproblems',
'name': 'Other Install Problems'}
]},
{'type': 'apple_updates', 'name': 'Apple Updates', 'subitems': [
{'type': 'apple_applesus', 'url': '/admin/applesus',
'name': 'Catalog Admin'},
{'type': 'apple_logs', 'url': '/admin/applesus/logs',
'name': 'Logs'},
{'type': 'apple_historical', 'url': '/admin/packages?applesus=1',
'name': 'Historical List'},
{'type': 'apple_installs', 'url': '/admin/installs?applesus=1',
'name': 'Installs'},
{'type': 'apple_failures',
'url': '/admin/installs?applesus=1&failures=1',
'name': 'Failures'}
]},
{'type': 'manifests', 'name': 'Manifests', 'subitems': [
{'type': 'manifests_admin', 'url': '/admin/manifest_modifications',
'name': 'Modification Admin'},
{'type': 'manifests_aliases', 'url': '/admin/package_alias',
'name': 'Package Aliases'},
{'type': 'manifest_stable', 'url': '/admin/manifest/stable',
'name': 'View Stable'},
{'type': 'manifest_testing', 'url': '/admin/manifest/testing',
'name': 'View Testing'},
{'type': 'manifest_unstable', 'url': '/admin/manifest/unstable',
'name': 'View Unstable'}
]},
{'type': 'admin_tools', 'name': 'Admin Tools', 'admin_only': True,
'subitems': [
{'type': 'acl_groups', 'url': '/admin/acl_groups',
'name': 'ACL Groups'},
{'type': 'config', 'url': '/admin/config',
'name': 'Configuration'},
{'type': 'ip_blacklist', 'url': '/admin/ip_blacklist',
'name': 'IP Blacklist'},
{'type': 'lock_admin', 'url': '/admin/lock_admin',
'name': 'Lock Admin'},
{'type': 'release_report', 'url': '/admin/release_report',
'name': 'Release Report'},
{'type': 'panic', 'url': '/admin/panic', 'name': 'Panic Mode'}
]},
{'type': 'tags', 'url': '/admin/tags', 'name': 'Tags'},
{'title': 'Client Reports'},
{'type': 'broken_clients', 'url': '/admin/brokenclients',
'name': 'Broken Clients'},
{'type': 'diskfree', 'url': '/admin/diskfree', 'name': 'Low Disk Space'},
{'type': 'uptime', 'url': '/admin/uptime', 'name': 'Long Uptime'},
{'type': 'offcorp', 'url': '/admin/offcorp', 'name': 'Longest Off Corp'},
{'type': 'msu_gui_logs', 'url': '/admin/msulogsummary',
'name': 'MSU GUI Logs'},
{'type': 'preflight_exits', 'url': '/admin/preflightexits',
'name': 'Preflight Exits'},
{'type': 'usersettings_knobs', 'url': '/admin/user_settings',
'name': 'UserSettings Knobs'}
]
for item in menu_items:
if 'type' in item:
if 'subitems' in item:
menu[item['type']] = {}
menu[item['type']]['name'] = item['name']
menu[item['type']]['subitems'] = collections.OrderedDict()
for subitem in item['subitems']:
menu[item['type']]['subitems'][subitem['type']] = subitem
else:
menu[item['type']] = item
elif 'title' in item:
menu[item['title']] = item
return menu
class AdminHandler(webapp2.RequestHandler):
"""Class for Admin UI request handlers."""
XSRF_PROTECT = False
def handle_exception(self, exception, debug_mode):
"""Handle an exception.
Args:
exception: exception that was thrown
debug_mode: True if the application is running in debug mode
"""
if issubclass(exception.__class__, auth.NotAuthenticated):
self.error(403)
return
else:
super(AdminHandler, self).handle_exception(exception, debug_mode)
def IsAdminUser(self):
"""Returns True if the current user is an admin, False otherwise."""
# NOTE(user): this is definitely not threadsafe.
if not hasattr(self, '_is_admin'):
self._is_admin = auth.IsAdminUser()
return self._is_admin
def Paginate(self, query, default_limit):
"""Returns a list of entities limited to limit, with a next_page cursor."""
try:
limit = int(self.request.get('limit', default_limit))
except ValueError:
limit = default_limit
if limit not in QUERY_LIMITS:
limit = default_limit
cursor = self.request.get('page', '')
if cursor:
query.with_cursor(cursor)
entities = list(query.fetch(limit))
if len(entities) == limit:
next_page = query.cursor()
else:
next_page = None
self._page = {
'limit': limit,
'next_page': next_page,
'results_count': len(entities),
}
return entities
def Render(self, template_path, values, write_to_response=True):
"""Renders a template using supplied data values and returns HTML.
Args:
template_path: str path of template.
values: dict of template values.
write_to_response: bool, True to write to response.out.write().
Returns:
str HTML of rendered template.
"""
path = os.path.join(
os.path.dirname(__file__), 'templates', template_path)
if not settings.DEV_APPSERVER:
values['static_path'] = 'myapp/%s' % os.getenv('CURRENT_VERSION_ID')
values['is_admin'] = self.IsAdminUser()
if not hasattr(self, '_menu'):
self._menu = GetMenu()
values['menu'] = self._menu
if not settings.APPROVAL_REQUIRED:
if 'proposals' in values['menu']['munki_packages']['subitems']:
del values['menu']['munki_packages']['subitems']['proposals']
if 'msg' not in values:
values['msg'] = self.request.GET.get('msg')
if 'report_type' not in values:
values['report_type'] = 'undefined_report'
if self.XSRF_PROTECT:
values['xsrf_token'] = xsrf.XsrfTokenGenerate(values['report_type'])
if hasattr(self, '_page'):
values['limit'] = self._page.get('limit')
values['next_page'] = self._page.get('next_page')
values['results_count'] = self._page.get('results_count')
values['limits'] = QUERY_LIMITS
values['request_query_params'] = self.request.GET
values['request_path'] = self.request.path
if self._page.get('next_page'):
# Generate next page link, replacing "page" query param with next_page.
query_params = self.request.GET.copy()
query_params['page'] = self._page.get('next_page')
values['next_page_link'] = '%s?%s' % (
self.request.path, urllib.urlencode(query_params, doseq=True))
html = template.render(path, values)
if write_to_response:
self.response.out.write(html)
return html
class UTCTZ(datetime.tzinfo):
"""tzinfo class for the UTC time zone."""
def tzname(self, unused_dt):
return 'UTC'
def dst(self, unused_dt):
return datetime.timedelta(0)
def utcoffset(self, unused_dt):
return datetime.timedelta(0)
def AddTimezoneToComputerDatetimes(computer):
"""Sets the tzinfo on all Computer.connected_datetimes for use with Django.
Args:
computer: models.Computer entity.
Returns:
Boolean. True if one date is today, false otherwise.
"""
for i in xrange(0, len(computer.connection_datetimes)):
cdt = computer.connection_datetimes[i]
# set timezone so Django "timesince" template filter works.
computer.connection_datetimes[i] = datetime.datetime(
cdt.year, cdt.month, cdt.day,
cdt.hour, cdt.minute, cdt.second,
tzinfo=UTCTZ())
def XmlToHtml(xml):
"""Convert an XML string into an HTML DOM with styles."""
tags = re.compile(r'\<(\/?)(\w*)([^<>]*)\>')
html = tags.sub((r'<span class="xml_tag \2"><\1<span class="xml_key">\2'
r'</span><span class="xml_attributes">\3</span>></span>'),
xml)
html = html.replace(' ', ' ').replace('\n', '<br/>')
return '<div class="xml">%s</div>' % html
|
StarcoderdataPython
|
3231494
|
<filename>foreign/apps.py
import json
from django.apps import AppConfig
from django.contrib.auth.models import User
from .models import CrossLabQuagentUserMap
class ForeignConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'foreign'
def ready(self):
# TODO: 后期可能将这些 dict 替换为数据库记录
# load information of 16 laboratories, list of dicts
with open('static/data/labs.json', 'r') as f:
labs = json.load(f)
labs_id = [lab['id'] for lab in labs]
# TODO: temporarily take two records as user-lab-map
for i, lab in enumerate(labs): # 只能先用13个lab
if i >= 13:
break
username = 'u' + '{}'.format(i + 1).zfill(2)
print('--' * 20)
print(User.objects.get(username=username))
# if not CrossLabQuagentUserMap.objects.filter(user=User.objects.get(username=username)):
if not CrossLabQuagentUserMap.objects.filter(ilab_lab_id=lab['id'], ilab_lab_name=lab['name']):
CrossLabQuagentUserMap.objects.create(
# user=User.objects.get(username=User.objects.get(username=username))
user=User.objects.get(username=username),
ilab_lab_id=lab['id'],
ilab_lab_name=lab['name']
)
print('>>> created lab-user-map', i)
# query current equipments via iLab API
# necessary information of INQUIRE facility
# data type: dict
# facility = requests.get(core_url + '/{}/'.format(facility_id), headers=headers).json()['ilab_response']['cores']
# equipments = requests.get(ilab_urls['equipment'], headers=headers).json()['ilab_response']['equipment']
with open('static/data/facility.json', 'r') as f:
facility = json.load(f)
with open('static/data/equipments.json', 'r') as f:
equipments = json.load(f)
# TODO: 目前的状况是,equipments包含五个设备:
# polarized entangled photon signal、time-energy sync signal 1550、time-energy sync signal 1340、SNSPD、Draft equipment
# 本来的打算是:极化纠缠 source、极化纠缠 idle、极化纠缠 sync、time-energy sync 1550、time-energy sync 1340、SNSPD
equipment_id_to_types = {
487862: 'ep',
487863: 'ep',
487865: 'ep',
487864: 'spd'
}
|
StarcoderdataPython
|
64942
|
<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
from django.urls import reverse
import authentication.models as a
class PostCategory(models.Model):
name = models.CharField(max_length=255, blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
created_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
def __str__(self):
if self.name is None:
return 'Post name is empty'
return self.name
class Posts(models.Model):
user_id = models.ForeignKey(a.User, on_delete=models.CASCADE)
category = models.ForeignKey(PostCategory, on_delete=models.CASCADE)
content = models.TextField(blank=True, null=True)
image = models.ImageField(blank=True, null=True, upload_to='posts/')
status = models.CharField(max_length=255, blank=True, null=True)
like_count = models.IntegerField(default=0, editable=False)
comment_count = models.IntegerField(default=0, editable=False)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
created_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
def __str__(self):
return str(self.user_id)
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class PostComments(models.Model):
user_id = models.ForeignKey(a.User, related_name='details', on_delete=models.CASCADE)
post_id = models.ForeignKey(Posts, related_name='details', on_delete=models.CASCADE)
content = models.TextField()
image = models.ImageField(blank=True, null=True, upload_to='posts/')
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
created_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
class PostLikes(models.Model):
liker_id = models.ForeignKey(a.User, related_name='likes', on_delete=models.CASCADE)
post_id = models.ForeignKey(Posts, related_name='likes', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now=True, null=True, blank=True, editable=False)
|
StarcoderdataPython
|
9748717
|
import numpy as np
from seak import kernels
def test_single_column_kernel():
V = np.asarray([[0, 1, 2, ], [0, 1, 2], [0, 1, 2]])
G = np.asarray([[0, 1, 2], [2, 0, 1], [1, 1, 1], [0, 1, 2], [0, 0, 2]]) * 100.
i = 2
result = kernels.single_column_kernel(i, False)(G, V)
expected_result = np.asarray([[0, 200, 400],
[400, 0, 200],
[200, 200, 200],
[0, 200, 400],
[0, 0, 400]])
assert np.all(np.isclose(result, expected_result))
def test_diffscore_max_kernel():
V = np.asarray([[0, 1, 2, ], [0, 1, 2], [0, 1, 2]])
G = np.asarray([[0, 1, 2], [2, 0, 1], [1, 1, 1], [0, 1, 2], [0, 0, 2]]) * 100.
result = kernels.diffscore_max(G, V, False)
expected_result = np.asarray([[0, 200, 400],
[400, 0, 200],
[200, 200, 200],
[0, 200, 400],
[0, 0, 400]])
assert np.all(np.isclose(result, expected_result))
def test_linear_kernel():
V = np.asarray([[0, 1, 2, ], [0, 1, 2], [0, 1, 2]])
G = np.asarray([[0, 1, 2], [2, 0, 1], [1, 1, 1], [0, 1, 2], [0, 0, 2]])
result = kernels.linear(G, V)
assert np.all(np.isclose(result, G))
|
StarcoderdataPython
|
11272368
|
<reponame>Oscar-Oliveira/Python3
"""
Files
"""
import os
file = open(os.path.realpath(__file__), "r")
done = False
while True:
line = file.readline()
if len(line) == 0:
break
print(line, end="")
if not done:
print("FIRST LINE AGAIN")
file.seek(0)
done = True
file.close()
|
StarcoderdataPython
|
4912063
|
<gh_stars>0
"""
Libraries for calculating inter- and intramolecular interactions
"""
from automol.intmol._pot import lj_potential
from automol.intmol._pot import exp6_potential
from automol.intmol._pot import pairwise_potential_matrix
from automol.intmol._rep import low_repulsion_struct
__all__ = [
'lj_potential',
'exp6_potential',
'pairwise_potential_matrix',
'low_repulsion_struct'
]
|
StarcoderdataPython
|
9769777
|
<filename>example/tickettest/settings.py
# -*- coding: utf-8 -*-
from .settings_base import *
import platform
if platform.uname()[0] == 'Linux':
if 'ip-172-31-37-167.us-west-2.compute.internal' in platform.uname()[1]:
DOMAIN_URL = '172.16.31.10'
DEBUG = True
TIME_ZONE = 'UTC'
HTTPS_SUPPORT = not DEBUG
SESSION_COOKIE_DOMAIN = '172.16.31.10'
SESSION_COOKIE_SECURE = not DEBUG
if not DEBUG:
import mimetypes
mimetypes.add_type("image/png", ".png", True)
|
StarcoderdataPython
|
1754111
|
import os
import tvm
from tvm.contrib import cc, util
def test_add(target_dir):
n = tvm.var("n")
A = tvm.placeholder((n,), name='A')
B = tvm.placeholder((n,), name='B')
C = tvm.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = tvm.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], "llvm", target_host="llvm", name="myadd")
fadd.save(os.path.join(target_dir, "add_cpu.o"))
cc.create_shared(os.path.join(target_dir, "add_cpu.so"),
[os.path.join(target_dir, "add_cpu.o")])
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.exit(-1)
test_add(sys.argv[1])
|
StarcoderdataPython
|
112090
|
<gh_stars>0
#!/usr/bin/env python3
import socket
from multiprocessing.dummy import Pool
from itertools import repeat
from requests import get
from uuid import getnode
from icmplib import ping as ICMPLibPing
def portScan(localIP, port, threads = 10, timeout = 0.5):
localIP = '.'.join(localIP.split('.')[0:-1]) + '.'
threadPool = Pool(threads)
scanResults = threadPool.starmap(isUp, zip([localIP + str(i) for i in range(1, 256)], repeat(port), repeat(timeout)))
threadPool.close()
threadPool.join()
hosts = []
for i in range(0, len(scanResults)):
if scanResults[i]:
hosts.append(scanResults[i].split(':')[0])
return hosts
def getOpenPorts(IP, portRangeStop = 65535, portRangeStart = 1, threads = 10, timeout = 1.5):
threadPool = Pool(threads)
scanResults = threadPool.starmap(isUp, [(IP, i, timeout) for i in range(portRangeStart, portRangeStop + 1)])
threadPool.close()
threadPool.join()
hosts = []
for i in range(0, len(scanResults)):
if scanResults[i]:
hosts.append(int(scanResults[i].split(':')[1]))
return hosts
def getOpenPortsFromList(IP, portList, threads = 10, timeout = 1.5):
threadPool = Pool(threads)
scanResults = threadPool.starmap(isUp, [(IP, i, timeout) for i in portList])
threadPool.close()
threadPool.join()
hosts = []
for i in range(0, len(scanResults)):
if scanResults[i]:
hosts.append(int(scanResults[i].split(':')[1]))
return hosts
def getPrivateIP():
try:
tempSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tempSocket.connect(("8.8.8.8", 80))
IP = tempSocket.getsockname()[0]
tempSocket.close()
return IP
except:
return "127.0.0.1"
def getPublicIP():
try:
return get("https://api.ipify.org").text
except:
return "0.0.0.0"
def ping(IP, timeout = 0.5):
if ICMPLibPing(str(IP), 1, 1, float(timeout), privileged=False).is_alive:
return IP
else:
return False
def hostScan(localIP, threads = 10, timeout = 0.5):
localIP = '.'.join(localIP.split('.')[0:-1]) + '.'
threadPool = Pool(threads)
scanResults = threadPool.starmap(ping, zip([localIP + str(i) for i in range(1, 256)], repeat(timeout)))
threadPool.close()
threadPool.join()
hosts = []
for i in range(0, len(scanResults)):
if scanResults[i]:
hosts.append(scanResults[i])
return hosts
def isUp(IP, port, timeout = 0.5):
try:
tempSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSocket.settimeout(timeout)
tempSocket.connect((str(IP), int(port)))
tempSocket.settimeout(None)
tempSocket.close()
return IP + ':' + str(port)
except:
return False
def getMACAddress():
try:
return ':'.join(hex(getnode()).replace("0x", "").upper()[i : i + 2] for i in range(0, 11, 2))
except:
return "00:00:00:00:00:00"
def getHostName():
try:
return socket.gethostname()
except:
return ""
|
StarcoderdataPython
|
261855
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import shutil
import subprocess
import tempfile
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
import edit_cuberite_config
from base_log_reader import BaseLogReader
PLUGIN_NAME = "recover_initial"
def recover_initial_blockmap(old_workdir):
"""Given a logdir containing a logging.bin, regenerate the initial blockmap
and return the directory with the region (.mca) files.
"""
workdir = tempfile.mkdtemp()
print("Workdir:", workdir, flush=True)
repo_home = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../")
# Copy files from old workdir
paths = ["Plugins", "settings.ini", "blocks.json", "world/world.ini"]
for p in paths:
src = os.path.join(old_workdir, p)
dst = os.path.join(workdir, p)
if os.path.isfile(src):
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copy(src, dst)
elif os.path.isdir(src):
shutil.copytree(src, dst)
# Remove logging plugin, add recovery plugin
settings_ini = os.path.join(workdir, "settings.ini")
plugins_dir = os.path.join(workdir, "Plugins")
recovery_plugin_dir = os.path.join(plugins_dir, PLUGIN_NAME)
edit_cuberite_config.remove_plugin(settings_ini, "logging")
edit_cuberite_config.add_plugin(settings_ini, PLUGIN_NAME)
if not os.path.isdir(recovery_plugin_dir):
shutil.copytree(
os.path.join(repo_home, "server/cuberite_plugins", PLUGIN_NAME), recovery_plugin_dir
)
# Read logging.bin to get chunks available, and rewrite recovery plugin
chunks = get_chunks_avail(old_workdir)
chunks_lua = tuple_list_to_lua(chunks)
with open(os.path.join(recovery_plugin_dir, "recover_initial.lua"), "r") as f:
recovery_lua = f.read()
recovery_lua = recovery_lua.replace("__CHUNKS_TO_LOAD__", chunks_lua)
with open(os.path.join(recovery_plugin_dir, "recover_initial.lua"), "w") as f:
f.write(recovery_lua)
# Start craftassist_cuberite_utils and wait until the plugin kills it
p = subprocess.Popen(
[repo_home + "/server/craftassist_cuberite_utils/Server/Cuberite"], cwd=workdir
)
p.wait()
# Return folder containing region files
return os.path.join(workdir, "world/region")
def get_chunks_avail(logdir):
chunks = []
class ChunkAvailLogReader(BaseLogReader):
def on_chunk_available(self, buf_start, hid, cx, cz):
chunks.append((cx, cz))
ChunkAvailLogReader(logdir).start()
return chunks
def tuple_list_to_lua(tuple_list):
"""Given a list of tuples, return a lua table of tables"""
def table(it):
return "{" + ",".join(map(str, it)) + "}"
return table(table(t) for t in tuple_list)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("workdir")
args = parser.parse_args()
recover_initial_blockmap(args.workdir)
|
StarcoderdataPython
|
6684682
|
from bank_bot.bankbot.core import bot, client_factory, safe_send_message
from bank_bot import settings
from bank_bot.banking_system import UserError, TransactionError, Database, HackerError, MessageError, AddressRecordError
# HACK_SUBSYSTEM
@bot.message_handler(commands=['hacker_help',])
def hacker_help_message(message):
# Help command
client = client_factory.create_client(message)
try:
client.hacker_validation()
bot.reply_to(message, settings.HACKER_HELP_MESSAGE)
return
except (UserError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
return
if settings.HACKING_ALLOWED:
@bot.message_handler(regexp=r"^\/h@ck_user [a-zA-Z0-9]{10}")
def hack_user(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_user(message.text)
except (UserError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.USER_DATA)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
bot.send_message(client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.USER_DATA, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_sent [a-zA-Z0-9]{10}")
def hack_user_sent_transaction_list(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_transactions(message.text, is_sender=True)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.SENT_TRANSACTIONS_DATA)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.SENT_TRANSACTIONS_DATA, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_recieved [a-zA-Z0-9]{10}")
def hack_user_recieved_transaction_list(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_transactions(message.text, is_sender=False)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.RECIEVED_TRANSACTIONS_DATA)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.RECIEVED_TRANSACTIONS_DATA, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_all [a-zA-Z0-9]{10}")
def hack_list_all_transactions(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_all_transactions(message.text)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.USER_DATA)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.TRANSACTIONS_DATA_HISTORY, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_pair [a-zA-Z0-9]{10} [a-zA-Z0-9]{10}")
def hack_list_pair_transactions(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, victim_hash, second_victim_chat_id, second_victim_hash, show_sender = client.hack_inspect_pair_history(message.text)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
first_victim_chat_id, second_victim_chat_id = err.victim_chat_id.split('|||')
failed_hack_message_first = settings.FAILED_HACK_ALERT.substitute(data_type=settings.HACK_TRANSACTIONS_OTHER_USER)
bot.send_message(first_victim_chat_id, failed_hack_message)
bot.send_message(second_victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
first_transaction_pair = settings.TRANSACTION_PAIR.substitute(second_user=second_victim_hash)
second_transaction_pair = settings.TRANSACTION_PAIR.substitute(second_user=victim_hash)
hack_message_first = settings.HACK_ALERT.substitute(data_type=first_transaction_pair, hacker_hash=client.user.character_hash)
hack_message_second = settings.HACK_ALERT.substitute(data_type=second_transaction_pair, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message_first)
bot.send_message(second_victim_chat_id, hack_message_second)
@bot.message_handler(regexp=r"^\/h@ck_history_messages_sent [a-zA-Z0-9]{10}")
def hack_user_sent_messages_list(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_messages(message.text, is_sender=True)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.SENT_MESSAGES_DATA)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.SENT_MESSAGES_DATA, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_messages_recieved [a-zA-Z0-9]{10}")
def hack_user_recieved_messages_list(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_messages(message.text, is_sender=False)
except (UserError, MessageError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.SENT_MESSAGES_DATA)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.RECIEVED_MESSAGES_DATA, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_messages [a-zA-Z0-9]{10}")
def hack_list_all_messages(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, show_sender = client.hack_inspect_all_messages(message.text)
except (UserError, MessageError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.FAILED_HACK_ALERT.substitute(data_type=settings.MESSAGES_DATA_HISTORY)
bot.send_message(err.victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
hack_message = settings.HACK_ALERT.substitute(data_type=settings.MESSAGES_DATA_HISTORY, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"^\/h@ck_history_messages_pair [a-zA-Z0-9]{10} [a-zA-Z0-9]{10}")
def hack_list_pair_message(message):
client = client_factory.create_client(message)
try:
results, victim_chat_id, victim_hash, second_victim_chat_id, second_victim_hash, show_sender = client.hack_inspect_pair_history_messages(message.text)
except (UserError, MessageError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
first_victim_chat_id, second_victim_chat_id = err.victim_chat_id.split('|||')
failed_hack_message_first = settings.FAILED_HACK_ALERT.substitute(data_type=settings.HACK_MESSAGES_OTHER_USER)
bot.send_message(first_victim_chat_id, failed_hack_message)
bot.send_message(second_victim_chat_id, failed_hack_message)
return
safe_send_message(bot, client.chat_id, results)
if show_sender:
first_transaction_pair = settings.MESSAGES_PAIR.substitute(second_user=second_victim_hash)
second_transaction_pair = settings.MESSAGES_PAIR.substitute(second_user=victim_hash)
hack_message_first = settings.HACK_ALERT.substitute(data_type=first_transaction_pair, hacker_hash=client.user.character_hash)
hack_message_second = settings.HACK_ALERT.substitute(data_type=second_transaction_pair, hacker_hash=client.user.character_hash)
bot.send_message(victim_chat_id, hack_message_first)
bot.send_message(second_victim_chat_id, hack_message_second)
@bot.message_handler(regexp=r"^\/h@ck_theft_other [a-zA-Z0-9]{10} [a-zA-Z0-9]{10} [0-9.]+")
def create_hacked_transaction_other(message):
client = client_factory.create_client(message)
try:
hacker_hash, victim_chat_id, reciever_chat_id, transaction_message, show_hack = client.create_hacker_transaction_other(message.text)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.HACK_FAILED_THEFT_ALERT
bot.send_message(err.victim_chat_id, failed_hack_message)
return
bot.send_message(reciever_chat_id, transaction_message)
if show_hack:
hack_message = settings.HACK_THEFT_ALERT.substitute(hacker_hash=hacker_hash)
bot.send_message(victim_chat_id, transaction_message)
bot.send_message(victim_chat_id, hack_message)
@bot.message_handler(regexp=r"\/h@ck_message [a-zA-Z0-9]{10} [\w\W]+")
def send_hacked_message(message):
# Generic messaging command; allows to send any message to another user registered in bot
# Only user's unique hash is required to send message; message is signed by sender's hash\
client = client_factory.create_client(message)
try:
reciever_chat_id, sent_message, show_hack = client.prepare_hacker_message(message.text)
except (UserError, MessageError, HackerError) as err:
bot.send_message(client.chat_id, err)
safe_send_message(bot, client.chat_id, f"{settings.MESSAGE_SEND_RESULT} {sent_message}")
if show_hack:
safe_send_message(bot, reciever_chat_id, f"{settings.INCOMING_MESSAGE} {sent_message}.\n{settings.MESSAGE_SENDER} {client.user.character_hash}")
else:
safe_send_message(bot, reciever_chat_id, f"{settings.INCOMING_MESSAGE} {sent_message}.\n{settings.MESSAGE_SENDER} {settings.HACKER_FAKE_HASH}")
@bot.message_handler(regexp=r"^\/h@ck_theft [a-zA-Z0-9]{10} [0-9.]+")
def create_hacked_transaction(message):
client = client_factory.create_client(message)
try:
hacker_chat_id, hacker_hash, victim_chat_id, transaction_message, show_hack = client.create_hacker_transaction(message.text)
except (UserError, TransactionError, HackerError) as err:
bot.send_message(client.chat_id, err.message)
if isinstance(err, HackerError) and err.low_level:
failed_hack_message = settings.HACK_FAILED_THEFT_ALERT
bot.send_message(err.victim_chat_id, failed_hack_message)
return
bot.send_message(hacker_chat_id, transaction_message)
if show_hack:
hack_message = settings.HACK_THEFT_ALERT.substitute(hacker_hash=hacker_hash)
bot.send_message(victim_chat_id, transaction_message)
bot.send_message(victim_chat_id, hack_message)
|
StarcoderdataPython
|
12835342
|
# External module imports
import RPi.GPIO as GPIO
# Sensor that checks whether water levels have gone too low
class WaterLevelSensor:
# Store which pin receives info from the water level sensor
def __init__(self, pin):
self.pin = pin
self.is_too_low = False
# Check to see if the water level is too low
def check_water_level(self):
print('pin {} is {}'.format(self.pin, GPIO.input(self.pin)))
if GPIO.input(self.pin) == GPIO.HIGH:
self.is_too_low = True
else:
self.is_too_low = False
return self.is_too_low
|
StarcoderdataPython
|
3567783
|
# Date: 06/07/2018
# Author: Pure-L0G1C
# Description: Interface for the master
from os import path
from re import match
from lib import const
from . import ssh, sftp
from hashlib import sha256
from time import time, sleep
from os import urandom, path
from threading import Thread
from datetime import datetime
class FTP(object):
def __init__(self, file, bot, download=True):
self.sftp = sftp.sFTP(const.PRIVATE_IP, const.FTP_PORT, max_time=60, verbose=True)
self.bot_id = bot['bot_id']
self.shell = bot['shell']
self.download = download
self.is_alive = False
self.success = False
self.time = None
self.file = file
def send(self, code, file=None):
if not path.exists(file):return
self.shell.send(code=code, args=file)
self.is_alive = True
self.sftp.send(file)
self.is_alive = False
self.time = self.sftp.time_elapsed
self.success = True if self.sftp.error_code != -1 else False
def recv(self, code, file=None):
self.shell.send(code=code, args=file)
self.is_alive = True
self.sftp.recv()
self.is_alive = False
self.time = self.sftp.time_elapsed
self.success = True if self.sftp.error_code != -1 else False
def close(self):
self.sftp.close()
######## Tasks #########
class Task(object):
def __init__(self, task_id, task_args, task_info_obj):
self.id = task_id
self.args = task_args
self.task_info_obj = task_info_obj
def start(self, bots):
for bot in [bots[bot] for bot in bots]:
bot['shell'].send(10, (self.id, self.args))
def stop(self, bots):
for bot in [bots[bot] for bot in bots]:
bot['shell'].send(11)
class TaskDdos(object):
def __init__(self, target, threads):
self.target = target
self.threads = threads
self.time_assigned = time()
def info(self):
time_assigned = datetime.fromtimestamp(self.time_assigned).strftime('%b %d, %Y at %I:%M %p')
a = 'Task name: Ddos Attack\nTime assigned: {}\n\n'.format(time_assigned)
b = 'Target: {}\nThreads: {}'.format(self.target, self.threads)
return a + b
######## Interface ########
class Interface(object):
def __init__(self):
self.bots = {}
self.ssh = None
self.ftp = None
self.task = None
self.sig = self.signature
def close(self):
if self.ftp:
self.ftp.close()
self.ftp = None
if self.ssh:
self.ssh.close()
self.ssh = None
self.disconnect_all()
def gen_bot_id(self, uuid):
bot_ids = [self.bots[bot]['bot_id'] for bot in self.bots]
while 1:
bot_id = sha256((sha256(urandom(64 * 32) + urandom(64 * 64)).digest().hex() + uuid).encode()).digest().hex()
if not bot_id in bot_ids:break
return bot_id
@property
def signature(self):
bots = b''
for bot in self.bots:
bot_id = self.bots[bot]['bot_id']
bot_id = bot_id[:8] + bot_id[-8:]
bots += bot_id.encode()
return sha256(bots).digest().hex()
def is_connected(self, uuid):
for bot in self.bots:
if self.bots[bot]['uuid'] == uuid:
return True
return False
def connect_client(self, sess_obj, conn_info, shell):
uuid = conn_info['args']['sys_info']['uuid']
if self.is_connected(uuid):
self.close_sess(sess_obj, shell)
else:
bot_id = self.gen_bot_id(uuid)
self.bots[sess_obj] = { 'bot_id': bot_id, 'uuid': uuid, 'intel': conn_info['args'], 'shell': shell, 'session': sess_obj }
self.sig = self.signature
print(self.bots)
if self.task:
shell.send(10, (self.task.id, self.task.args))
def close_sess(self, sess_obj, shell_obj):
print('Closing session ...')
shell_obj.is_alive = False
shell_obj.send(code=7, args=None) # 7 - disconnect
sess_obj.close()
if sess_obj in self.bots:
del self.bots[sess_obj]
self.sig = self.signature
def disconnect_client(self, sess_obj):
print('Disconnecting client ...')
if sess_obj in self.bots:
self.bots[sess_obj]['shell'].is_alive = False
bot_id = self.bots[sess_obj]['bot_id']
if self.ftp:
if self.ftp.bot_id == bot_id:
self.ftp.close()
self.ftp = None
self.close_sess(sess_obj, self.bots[sess_obj]['shell'])
self.sig = self.signature
def disconnect_all(self):
for bot in [self.bots[bot] for bot in self.bots]:
bot['session'].close()
self.sig = self.signature
def get_bot(self, bot_id):
for bot in self.bots:
if self.bots[bot]['bot_id'] == bot_id:
return self.bots[bot]
def ssh_obj(self, bot_id):
bot = self.get_bot(bot_id)
if bot:
if self.ssh:
self.ssh.close()
self.ssh = ssh.SSH(const.PRIVATE_IP, const.SSH_PORT, max_time=30, verbose=True)
sock_obj = self.ssh.start()
if sock_obj:
t = Thread(target=self.ssh.serve, args=[sock_obj])
t.daemon = True
t.start()
bot['session'].send(code=1)
return self.ssh
else:
self.ssh.close()
self.ssh = None
def ssh_exe(self, cmd):
return self.ssh.send(cmd)
def ftp_obj(self, bot_id, cmd_id, file, override):
bot = self.get_bot(bot_id)
if not bot:
return ''
if cmd_id == 3:
if not path.exists(file):
return 'Upload process failed; the file {} was not found'.format(file)
if self.ftp:
if all([self.ftp.is_alive, not override]):
return 'Already {} {} {} {}. Use --override option to override this process'.format('Downloading' if self.ftp.download else 'Uploading',
self.ftp.file, 'from' if self.ftp.download else 'to', self.ftp.bot_id[:8])
else:
self.ftp.close()
self.ftp = ftp_obj = FTP(file, bot, download=False if cmd_id == 3 else True)
ftp_func = self.ftp.send if cmd_id == 3 else self.ftp.recv
ftp_thread = Thread(target=ftp_func, args=[cmd_id, file])
ftp_thread.daemon = True
ftp_thread.start()
return '{} process started successfully'.format('Download' if self.ftp.download else 'Upload')
def ftp_status(self):
if not self.ftp:
return 'No file transfer in progress'
if self.ftp.is_alive:
return '{} {} {} {}. Check back in 1 minute'.format('Downloading' if self.ftp.download else 'Uploading',
self.ftp.file, 'from' if self.ftp.download else 'to', self.ftp.bot_id[:8])
else:
return 'Attempted to {} {} {} {}. The process {} a success. Time-elapsed: {}(sec)'.format('download' if self.ftp.download else 'upload',
self.ftp.file, 'from' if self.ftp.download else 'to',
self.ftp.bot_id[:8], 'was' if self.ftp.success else 'was not', self.ftp.time)
def execute_cmd_by_id(self, bot_id, cmd_id, args):
override = True if '--override' in args else False
if not cmd_id.isdigit():
return 'Failed to send command'
cmd_id = int(cmd_id)
if override:
args.pop(args.index('--override'))
if cmd_id == 1:
return self.ftp_status()
elif any([cmd_id == 3, cmd_id == 4, cmd_id == 5]):
return self.ftp_obj(bot_id, cmd_id, ' '.join(args[0:]) if cmd_id != 5 else 'a screenshot', override)
else:
bot = self.get_bot(bot_id)
if bot:
bot['shell'].send(code=cmd_id, args=args)
if cmd_id == 12:
if not bot['shell'].keylogging:
bot['shell'].keylogging = True
else:
return 'Keylogger is already active'
if cmd_id == 13:
if bot['shell'].keylogging:
bot['shell'].keylogging = False
else:
return 'Keylogger is already inactive'
if all([cmd_id == 14, not bot['shell'].keylogging]):
return 'Keylogger is inactive'
return self.keystrokes(bot['shell']) if cmd_id == 14 else 'Command sent successfully'
return 'Failed to send command'
def keystrokes(self, bot_shell):
while all([bot_shell.is_alive, not bot_shell.keystrokes]):
pass
try:
if all([bot_shell.is_alive, bot_shell.keystrokes]):
keystrokes = bot_shell.keystrokes
bot_shell.keystrokes = None
return keystrokes if keystrokes != '-1' else ''
except:
pass
def start_task(self):
Thread(target=self.task.start, args=[self.bots], daemon=True).start()
def stop_task(self):
if self.task:
t = Thread(target=self.task.stop, args=[self.bots], daemon=True)
t.start()
t.join()
self.task = None
def execute_cmd_by_task_id(self, cmd_id, args):
if not cmd_id.isdigit():
return 'Failed to send command'
cmd_id = int(cmd_id)
if cmd_id == 0: # stop task
Thread(target=self.stop_task, daemon=True).start()
return 'Task terminated' if self.task else 'No task is set'
elif cmd_id == 1: # status
return self.get_task()
else:
resp = self.set_task(cmd_id, args)
if resp == True:
self.start_task()
return 'Task set successfully'
else:
return resp
def get_task(self):
return 'No task is set' if not self.task else self.task.task_info_obj.info()
def set_task(self, task_id, args):
if task_id == 2: # ddos
return self.set_ddos_task(args)
else:
return 'Failed to set task'
def set_ddos_task(self, args):
task_id = 1 # the the bot side
if not len(args) == 3:
return 'Invalid amount of arguments'
ip, port, threads = args
if not self.valid_ip(ip):
return 'Invalid IP address'
if not self.valid_port(port):
return 'Invalid port'
if not self.valid_thread(threads):
return 'Invalid thread'
task_info_obj = TaskDdos('{}:{}'.format(ip, port), threads)
self.task = Task(task_id, (ip, int(port), int(threads)), task_info_obj)
return True
def valid_thread(self, thread):
return True if thread.isdigit() else False
def valid_ip(self, ip):
return False if not match(r'^(?!0)(?!.*\.$)((1?\d?\d|25[0-5]|2[0-4]\d)(\.|$)){4}$', ip) else True
def valid_port(self, port):
_port = str(port).strip()
if not len(_port):
return False
else:
# check if number
for item in _port:
if not item.isdigit():
return False
# check if number starts with a zero
if int(_port[0]) == 0:
return False
# check if number is larger than 65535
if int(_port) > 65535:
return False
return True
|
StarcoderdataPython
|
6461258
|
<gh_stars>0
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
from myuw.test.api import MyuwApiTest, require_url, fdao_hfs_override
@fdao_hfs_override
@require_url('myuw_hfs_api')
class TestHFS(MyuwApiTest):
def get_hfs_api_response(self):
return self.get_response_by_reverse('myuw_hfs_api')
def test_javerage(self):
self.set_user('javerage')
response = self.get_hfs_api_response()
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data["employee_husky_card"]["balance"], 1)
self.assertEquals(data["resident_dining"]["balance"], 5.1)
self.assertEquals(data["student_husky_card"]["balance"], 1.23)
def test_bad_user(self):
self.set_user('err-user')
response = self.get_hfs_api_response()
self.assertEquals(response.status_code, 404)
self.set_user('none')
response = self.get_hfs_api_response()
self.assertEquals(response.status_code, 404)
def test_error(self):
self.set_user('jerror')
response = self.get_hfs_api_response()
self.assertEquals(response.status_code, 543)
def test_eight(self):
self.set_user('eight')
response = self.get_hfs_api_response()
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertIsNone(data["employee_husky_card"])
self.assertEquals(data["resident_dining"]["balance"], 15.1)
self.assertEquals(data["student_husky_card"]["balance"], 100.23)
|
StarcoderdataPython
|
3326358
|
<filename>saleor/graphql/meta/types.py
import graphene
from graphene.types.generic import GenericScalar
from ...core.models import ModelWithMetadata
from ..channel import ChannelContext
from ..core.descriptions import ADDED_IN_33, PREVIEW_FEATURE
from ..core.types import NonNullList
from .resolvers import (
check_private_metadata_privilege,
resolve_metadata,
resolve_object_with_metadata_type,
resolve_private_metadata,
)
class MetadataItem(graphene.ObjectType):
key = graphene.String(required=True, description="Key of a metadata item.")
value = graphene.String(required=True, description="Value of a metadata item.")
class Metadata(GenericScalar):
"""Metadata is a map of key-value pairs, both keys and values are `String`.
Example:
```
{
"key1": "value1",
"key2": "value2"
}
```
"""
def _filter_metadata(metadata, keys):
if keys is None:
return metadata
return {key: value for key, value in metadata.items() if key in keys}
class ObjectWithMetadata(graphene.Interface):
private_metadata = NonNullList(
MetadataItem,
required=True,
description=(
"List of private metadata items. Requires staff permissions to access."
),
)
private_metafield = graphene.String(
args={"key": graphene.NonNull(graphene.String)},
description=(
"A single key from private metadata. "
"Requires staff permissions to access.\n\n"
"Tip: Use GraphQL aliases to fetch multiple keys."
+ ADDED_IN_33
+ PREVIEW_FEATURE
),
)
private_metafields = Metadata(
args={"keys": NonNullList(graphene.String)},
description=(
"Private metadata. Requires staff permissions to access. "
"Use `keys` to control which fields you want to include. "
"The default is to include everything." + ADDED_IN_33 + PREVIEW_FEATURE
),
)
metadata = NonNullList(
MetadataItem,
required=True,
description=(
"List of public metadata items. Can be accessed without permissions."
),
)
metafield = graphene.String(
args={"key": graphene.NonNull(graphene.String)},
description=(
"A single key from public metadata.\n\n"
"Tip: Use GraphQL aliases to fetch multiple keys."
+ ADDED_IN_33
+ PREVIEW_FEATURE
),
)
metafields = Metadata(
args={"keys": NonNullList(graphene.String)},
description=(
"Public metadata. Use `keys` to control which fields you want to include. "
"The default is to include everything." + ADDED_IN_33 + PREVIEW_FEATURE
),
)
@staticmethod
def resolve_metadata(root: ModelWithMetadata, _info):
return resolve_metadata(root.metadata)
@staticmethod
def resolve_metafield(root: ModelWithMetadata, _info, *, key: str):
return root.metadata.get(key)
@staticmethod
def resolve_metafields(root: ModelWithMetadata, _info, *, keys=None):
return _filter_metadata(root.metadata, keys)
@staticmethod
def resolve_private_metadata(root: ModelWithMetadata, info):
return resolve_private_metadata(root, info)
@staticmethod
def resolve_private_metafield(root: ModelWithMetadata, info, *, key: str):
check_private_metadata_privilege(root, info)
return root.private_metadata.get(key)
@staticmethod
def resolve_private_metafields(root: ModelWithMetadata, info, *, keys=None):
check_private_metadata_privilege(root, info)
return _filter_metadata(root.private_metadata, keys)
@classmethod
def resolve_type(cls, instance: ModelWithMetadata, _info):
if isinstance(instance, ChannelContext):
# Return instance for types that use ChannelContext
instance = instance.node
item_type, _ = resolve_object_with_metadata_type(instance)
return item_type
|
StarcoderdataPython
|
12850401
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for batch prediction pipeline."""
import argparse
from absl import logging
from kfp.v2.google import client
def run_training_pipeline():
"""Main function for batch prediction pipeline runner."""
parser = argparse.ArgumentParser()
parser.add_argument('--project_id', type=str)
parser.add_argument('--pipeline_region', type=str)
parser.add_argument('--pipeline_root', type=str)
parser.add_argument('--pipeline_job_spec_path', type=str)
# Staging path for running custom job
parser.add_argument('--data_pipeline_root', type=str)
# Parameters required for data ingestion and processing
parser.add_argument('--input_dataset_uri', type=str)
parser.add_argument('--gcs_data_output_folder', type=str)
parser.add_argument('--data_region', type=str)
parser.add_argument('--gcs_result_folder', type=str)
# Parameters required for training job
parser.add_argument('--model_resource_name', type=str, default='')
parser.add_argument('--endpoint_resource_name', type=str, default='')
# Parameters required for batch prediction job
parser.add_argument('--machine_type', type=str, default='n1-standard-4')
parser.add_argument('--accelerator_count', type=int, default=0)
parser.add_argument('--accelerator_type',
type=str, default='ACCELERATOR_TYPE_UNSPECIFIED')
parser.add_argument('--starting_replica_count', type=int, default=1)
parser.add_argument('--max_replica_count', type=int, default=2)
# Parameters required for pipeline scheduling
parser.add_argument('--pipeline_schedule',
type=str, default='', help='0 2 * * *')
parser.add_argument('--pipeline_schedule_timezone',
type=str, default='US/Pacific')
parser.add_argument('--enable_pipeline_caching',
action='store_true',
default=False,
help='Specify whether to enable caching.')
args, _ = parser.parse_known_args()
logging.info(args)
api_client = client.AIPlatformClient(args.project_id, args.pipeline_region)
params_to_remove = ['pipeline_region', 'pipeline_root',
'pipeline_job_spec_path', 'pipeline_schedule',
'pipeline_schedule_timezone', 'enable_pipeline_caching']
pipeline_params = vars(args).copy()
for item in params_to_remove:
pipeline_params.pop(item, None)
if not args.pipeline_schedule:
api_client.create_run_from_job_spec(
args.pipeline_job_spec_path,
pipeline_root=args.pipeline_root,
parameter_values=pipeline_params,
enable_caching=args.enable_pipeline_caching
)
else:
api_client.create_schedule_from_job_spec(
args.pipeline_job_spec_path,
schedule=args.pipeline_schedule,
time_zone=args.pipeline_schedule_timezone,
pipeline_root=args.pipeline_root,
parameter_values=pipeline_params,
enable_caching=args.enable_pipeline_caching
)
if __name__ == '__main__':
run_training_pipeline()
|
StarcoderdataPython
|
4859064
|
<reponame>albertcrowley/srt-fbo-scraper<filename>utils/sam_utils.py
from datetime import datetime as dt
from datetime import timedelta
from io import BytesIO
import logging
import os
import sys
import zipfile
import requests
from .request_utils import requests_retry_session, get_org_request_details
logger = logging.getLogger(__name__)
def get_org_info(org_id):
uri, params = get_org_request_details()
params.update({'fhorgid':org_id})
try:
with requests_retry_session() as session:
r = session.get(uri, params = params, timeout = 100)
except Exception as e:
logger.error(f"Exception {e} getting org info from {uri} with these params:\n\
{params}", exc_info=True)
sys.exit(1)
data = r.json()
org_list = data['orglist']
try:
first_org_record = org_list[0]
except IndexError:
return '',''
agency = first_org_record.get('fhagencyorgname','')
office = first_org_record.get('fhorgname','')
return agency, office
def write_zip_content(content, out_path):
"""Writes the bytes content of a request for a zip archive to out_path
Arguments:
content {bytes} -- binary response content (i.e. r.content)
out_path {str} -- directory to write zip files to
"""
textract_ext = ('.doc','.docx','.epub','.gif','.htm','.html','.odt','.pdf','.rtf','.txt')
z = zipfile.ZipFile(BytesIO(content))
unzipped_file_list = z.filelist
if not unzipped_file_list:
#if the archive's corrupted, this list is empty
return
try:
z.extractall(out_path)
except RuntimeError:
#occurs on password protected archives
return
file_list = []
for f in unzipped_file_list:
try:
file_name = f.filename
if not file_name.endswith('/'):
file_out_path = os.path.join(out_path, file_name)
if file_out_path.endswith(textract_ext):
file_list.append(file_out_path)
else:
#capturing as non-machine
file_list.append(file_out_path)
except AttributeError:
pass
file_list = [os.path.join(out_path, os.path.basename(f)) for f in file_list]
return file_list
def get_notice_data(opp_data, opp_id):
poc = opp_data.get('pointOfContacts')
if not poc:
emails = []
else:
emails = [p.get('email') for p in poc if p.get('email')]
#classification_code = opp_data.get('classificationCode','')
# will revisit to document missing "classification code"
try:
classification_code = opp_data.get('psc','')[0].get('code','')
except IndexError:
classification_code = 0
naics = max([i for naics_list in
[i.get('code') for i in opp_data.get('naics',{})]
for i in naics_list], key = len)
subject = opp_data.get('title','').title()
url = f'https://beta.sam.gov/opp/{opp_id}/view'
# set_aside = opp_data.get('solicitation',{}).get('setAside','')
set_aside = opp_data.get('typeOfSetAside', '')
notice_data = {'classcod': classification_code,
'naics': naics,
'subject': subject,
'url': url,
'setaside': set_aside,
'emails': emails}
return notice_data
def get_notice_type(notice_type_code):
sam_nt_map = {'o':'Solicitation',
'p':'Presolicitation',
'k':'Combined Synopsis/Solicitation'}
sam_notice_type = sam_nt_map.get(notice_type_code, '').title()
if not sam_notice_type:
other_codes = {'r':'Sources Sought',
'g':'Sale of Surplus Property',
's':'Special Notice',
'i':'Intent to Bundle Requirements (DoD- Funded)',
'a':'Award Notice',
'u':'Justification and Authorization'}
if notice_type_code not in other_codes:
logger.warning(f"Found an unanticipated notice type with code: {notice_type_code}")
return
return
return sam_notice_type
def schematize_opp(opp):
#opp_id = opp.get('opportunityId')
opp_id = opp.get('_id')
if not opp_id:
logger.warning(f"No opp_id for {opp}")
return
#opp_data = opp.get('data')
opp_data = opp
if not opp_data:
return
#notice_type_code = opp_data.get('type')
notice_type_code = opp_data.get('type')['value']
#notice_type = get_notice_type(notice_type_code)
notice_type = notice_type_code
if not notice_type:
return
#org_id = opp_data.get('organizationId')
agency =opp_data.get('organizationHierarchy','')[0].get('name','')
office =opp_data.get('organizationHierarchy','')[1].get('name','')
solicitation_number = opp_data.get('solicitationNumber','')
#agency, office = get_org_info(org_id)
#agency = opp_data
required_data = {'notice type': notice_type,
'solnbr': solicitation_number,
'agency': agency,
'compliant': 0,
'office': office,
'attachments': []}
notice_data = get_notice_data(opp_data, opp_id)
schematized_opp = {**required_data, **notice_data}
schematized_opp['opp_id'] = opp_id
return schematized_opp
def naics_filter(opps):
"""Filter out opps without desired naics
Arguments:
opps {list} -- a list of sam opportunity api results
naics {list} -- a list of naics to filter with
Returns:
[list] -- a subset of results with matching naics
"""
naics = ('334111', '334118', '3343', '33451', '334516', '334614',
'5112', '518', '54169', '54121', '5415', '54169', '61142')
filtered_opps = []
for opp in opps:
#naics_array = opp.get('data',{}).get('naics')
naics_array = opp.get('naics',{})
if not naics_array:
continue
nested_naics_codes = [c for c in [d.get('code',[]) for d in naics_array]]
#opp_naics = [i for sublist in nested_naics_codes for i in sublist]
opp_naics = [i for i in nested_naics_codes ]
for c in opp_naics:
if any(c.startswith(n) for n in naics):
filtered_opps.append(opp)
break
return filtered_opps
def get_dates_from_opp(opp):
mod_date = opp.get('modifiedDate','')
if "T" in mod_date:
modified_date = mod_date.split('T')[0]
else:
modified_date = mod_date.split(' ')[0]
#post_date = opp.get('postedDate','')
post_date = opp.get('publishDate','')
if "T" in post_date:
posted_date = post_date.split('T')[0]
else:
posted_date = post_date.split(' ')[0]
posted_date_dt = None
modified_date_dt = None
try:
modified_date_dt = dt.strptime(modified_date, "%Y-%m-%d")
except ValueError:
pass
try:
posted_date_dt = dt.strptime(posted_date, "%Y-%m-%d")
except ValueError:
pass
return modified_date_dt, posted_date_dt
def get_day(today_or_yesterday):
if today_or_yesterday == 'today':
day = dt.strptime(dt.today().strftime("%Y-%m-%d"), "%Y-%m-%d")
elif today_or_yesterday == 'yesterday':
day = dt.strptime((dt.today() - timedelta(1)).strftime("%Y-%m-%d"), "%Y-%m-%d")
return day
def find_yesterdays_opps(opps):
yesterday = get_day('yesterday')
today = get_day('today')
yesterdays_opps = []
todays_opps = []
for i, opp in enumerate(opps):
modified_date_dt, posted_date_dt = get_dates_from_opp(opp)
is_mod_yesterday = modified_date_dt == yesterday
is_mod_today = modified_date_dt == today
try:
is_post_yesterday = posted_date_dt == yesterday
is_post_today = posted_date_dt == today
except:
# some notices don't provide one of these dates, and we shouldn't guess
pass
if is_mod_yesterday or is_post_yesterday:
yesterdays_opps.append(opp)
elif is_mod_today or is_post_today:
todays_opps.append(opp)
else:
pass
# the entries are ordered by date so once it gets past yesterday we can stop
dateStr = opps[-1:][0]['modifiedDate'][0:10]
modDate = dt.strptime(dateStr, '%Y-%m-%d')
if modDate < get_day('yesterday'):
is_more_opps = False
else:
is_more_opps = True
return yesterdays_opps, is_more_opps
|
StarcoderdataPython
|
6647701
|
from django.db.models import signals
from django.utils.functional import curry
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from django.contrib.admin.models import LogEntry
from django.contrib.sessions.models import Session
from django_extlog.models import ExtLog
class AuditLoggingMiddleware(object):
ip_address = None
def process_request(self, request):
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
session = request.session.session_key
self.ip_address = request.META.get('REMOTE_ADDR', None)
update_post_save_info = curry(
self._update_post_save_info,
user,
session,
)
update_post_delete_info = curry(
self._update_post_delete_info,
user,
session,
)
signals.post_save.connect(
update_post_save_info,
dispatch_uid=(self.__class__, request,),
weak=False
)
signals.post_delete.connect(
update_post_delete_info,
dispatch_uid=(self.__class__, request,),
weak=False
)
def process_response(self, request, response):
signals.post_save.disconnect(dispatch_uid=(self.__class__, request,))
signals.post_delete.disconnect(dispatch_uid=(self.__class__, request,))
return response
def _save_to_log(self, instance, action, user):
content_type = ContentType.objects.get_for_model(instance)
if content_type.app_label != 'django_extlog' and user:
object_id = instance.id if hasattr(instance, 'id') else 0
if hasattr(object_id, 'hex'):
object_id = object_id.hex
ExtLog.objects.create(
object_id=object_id,
app_name=content_type.app_label,
model_name=content_type.model,
action=action,
# object_instance=serializers.serialize('json', [instance]),
user=user,
ip=self.ip_address,
)
def _update_post_save_info(
self,
user,
session,
sender,
instance,
**kwargs
):
if sender in [LogEntry, Session]:
return
if kwargs['created']:
self._save_to_log(instance, ExtLog.ACTION_TYPE_CREATE, user)
else:
self._save_to_log(instance, ExtLog.ACTION_TYPE_UPDATE, user)
def _update_post_delete_info(
self,
user,
session,
sender,
instance,
**kwargs
):
if sender in [LogEntry, Session]:
return
self._save_to_log(instance, ExtLog.ACTION_TYPE_DELETE, user)
|
StarcoderdataPython
|
6480955
|
<reponame>CGI-define-and-primeportal/trac-plugin-autocomplete<filename>autocompleteplugin/model.py
# coding: utf-8
#
# Copyright (c) 2010, Logica
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from trac.core import *
from trac.env import IEnvironmentSetupParticipant
from trac.db.schema import Table, Column
from trac.db.api import DatabaseManager
from trac.config import ListOption
from tracsqlhelper import execute_non_query, get_scalar, create_table
class AutoCompleteModel(Component):
implements(IEnvironmentSetupParticipant)
#Default values, read from trac.ini on creation
_default_autocomplete_values = ListOption('autocomplete', 'shown_groups',
'project_viewers,project_managers,project_members', doc=
"""User groups used in auto complete enabled inputs.""")
_default_autocomplete_name = _default_autocomplete_values.name
_default_autocomplete_description = _default_autocomplete_values.__doc__
#IEnvironmentSetupParticipant methods
def environment_created(self):
"""Called when a new Trac environment is created."""
self.upgrade_environment(self.env.get_db_cnx())
def environment_needs_upgrade(self, db):
"""Called when Trac checks whether the environment needs to be upgraded.
Should return `True` if this participant needs an upgrade to be
performed, `False` otherwise.
"""
version = self.version()
return version < len(self.steps)
def upgrade_environment(self, db):
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any database
transactions. This is done implicitly for each participant
if the upgrade succeeds without an error being raised.
However, if the `upgrade_environment` consists of small, restartable,
steps of upgrade, it can decide to commit on its own after each
successful step.
"""
if not self.environment_needs_upgrade(db):
return
version = self.version()
for version in range(self.version(), len(self.steps)):
for step in self.steps[version]:
step(self)
execute_non_query(self.env, """UPDATE SYSTEM SET value='%s' WHERE
name='autocompleteplugin.db_version';""" % len(self.steps))
def version(self):
"""Returns version of the database (an int)"""
version = get_scalar(self.env, """SELECT value FROM system WHERE name =
'autocompleteplugin.db_version';""")
if version:
return int(version)
return 0
### upgrade steps
def create_db(self):
autocomplete_table = Table('autocomplete', key='name')[
Column('name', 'text'),
Column('description', 'text')]
autocomplete_values_table = Table('autocomplete_values')[
Column('autocomplete_name', 'text'),
Column('value', 'text')]
create_table(self.env, autocomplete_table)
create_table(self.env, autocomplete_values_table)
execute_non_query(self.env, """INSERT INTO system (name, value) VALUES
('autocompleteplugin.db_version', '1');""")
def add_default_data(self):
#Add default autocomplete name
AutoCompleteGroup(self.env).add_autocomplete(self._default_autocomplete_name,
self._default_autocomplete_description)
#Add default autocomplete names
for value in self._default_autocomplete_values:
AutoCompleteGroup(self.env).add_autocomplete_name(self._default_autocomplete_name,
value)
def remove_data_from_config(self):
#Remove shown_groups from config
self.config.set('autocomplete', 'shown_groups', None)
self.config.save()
# ordered steps for upgrading
steps = [ [ create_db, add_default_data, remove_data_from_config ] # version 1
]
class AutoCompleteGroup(object):
"""Simple class for handling AutoComplete group values"""
def __init__(self, env):
self.env = env
def get_autocomplete_values(self, autocomplete_name):
"""Returns a list of values for the given autocomplete_name"""
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('SELECT value FROM autocomplete_values WHERE autocomplete_name = %s',
(autocomplete_name,))
values = [value for value, in cursor]
return values
def add_autocomplete(self, autocomplete_name, description=None):
"""Adds an autocomplete section with description"""
db = self.env.get_db_cnx()
if not description:
description = ""
@self.env.with_transaction()
def do_save(db):
cursor = db.cursor()
cursor.execute('''INSERT INTO autocomplete(name, description) VALUES
(%s, %s)''',(autocomplete_name, description))
def add_autocomplete_name(self, autocomplete_name, value):
"""Adds an autocomplete value related to an autocomplete name"""
db = self.env.get_db_cnx()
relation_exists = self.check_if_section_name_exists(autocomplete_name, value)
@self.env.with_transaction()
def do_save(db):
cursor = db.cursor()
#Add data only if no relation exists
if not relation_exists:
cursor.execute('''INSERT INTO autocomplete_values(autocomplete_name,
value) VALUES (%s, %s)''',(autocomplete_name, value))
def remove_autocomplete_name(self, autocomplete_name, value):
"""Remove an autocomplete value related to an autocomplete name"""
db = self.env.get_db_cnx()
relation_exists = self.check_if_section_name_exists(autocomplete_name, value)
@self.env.with_transaction()
def do_delete(db):
#Remove only if relation exists
if relation_exists:
cursor = db.cursor()
cursor.execute('''DELETE FROM autocomplete_values WHERE
autocomplete_name = %s AND value = %s''',
(autocomplete_name, value))
def check_if_section_name_exists(self, autocomplete_name, value):
"""Checks if autocomplete_name name relation exists"""
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('''SELECT autocomplete_name FROM autocomplete_values WHERE
autocomplete_name = %s AND value = %s''',
(autocomplete_name, value,))
if cursor.fetchone() is None:
return False
return True
|
StarcoderdataPython
|
5119485
|
<gh_stars>0
# ============================================================================
# Copyright 2021.
#
#
# Author: <NAME>
# Contact: <EMAIL>, <EMAIL>
#
#
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow implementation of NVAE"""
import tensorflow as tf
import tensorflow.keras as tfk
import tensorflow_probability as tfp
import numpy as np
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import prefer_static
from conditional_distributions import DISTRIBUTION_LAYER_CLASS,kullback_leibler_divergence
from layers import LAYER_CLASS
from thirdparty.initializers_v2 import PytorchInit
__all__ = [
'VariationalLayer',
]
debug=False
class VariationalLayer(tfk.layers.Layer):
"""
*** Variational Layer ****
Args:
posterior_type: Class of distribution for the posterior.
prior_type: Class of distribution for the prior.
latent_shape: Chape of the latent features of each layer.
posterior_hparams: The hyper-parameters for the posterior distribution.
prior_hparams: The hyper-parameters for prior distribution.
encoder_hparams: The hyper-parameters for the residual cell per layer of the encoder (bottom-up) model.
generator_hparams: The hyper-parameters for the residual cell per layer of the generative (top-down) model.
merge_generator_hparams: The hyper-parameters for the cell that merges the latent sample and the context of the previous layer of the generative model.
` merge_encoder_hparams: The hyper-parameters for the cell that merges the deterministic context of the next layer of the bottom-up model and the context of the previous layer of the top-down model for the bi-directional inference.
constant_generator: Flag indicating whether the variational layer is the first in the hierarchy, thus it is not receiving context from the previous layer.
generator_feature_shape: In case constant_generator=True, it indicates the shape of the constant tensor.
residual: Flag indicating whether the residual parametrization between the prior and the posterior will be used.
"""
def __init__(self,
posterior_type,
prior_type,
latent_shape,
posterior_hparams,
prior_hparams=None,
encoder_hparams=None,
generator_hparams=None,
merge_encoder_hparams=None,
merge_generator_hparams=None,
constant_generator=False,
generator_feature_shape=None,
residual=False,
**kwargs):
super(VariationalLayer, self).__init__(dtype=tf.float32,**kwargs)
self._latent_shape=latent_shape
self._posterior_type=posterior_type
self._prior_type=prior_type
self._latent_shape=latent_shape
self._residual=residual
self._posterior_hparams=posterior_hparams
self._prior_hparams=prior_hparams
self._encoder_hparams=encoder_hparams
self._generator_hparams=generator_hparams
self._constant_generator=constant_generator
self._merge_encoder_hparams=merge_encoder_hparams
self._merge_generator_hparams=merge_generator_hparams
if self._encoder_hparams is not None:
self._encoder_network=LAYER_CLASS[self._encoder_hparams.layer_type](hparams=self._encoder_hparams)
else:
self._encoder_network=tf.keras.layers.Lambda(lambda x:x)
if self._generator_hparams is not None:
if self._constant_generator:
## this is h in Figure 2. in [1].
self._generator_network=tfp.layers.VariableLayer(shape=prefer_static.concat([[1],generator_feature_shape], axis=0),
initializer=PytorchInit(),
)
else:
self._generator_network=LAYER_CLASS[self._generator_hparams.layer_type](hparams=self._generator_hparams)
else:
self._generator_network=tf.keras.layers.Lambda(lambda x:x)
if self._merge_generator_hparams is not None:
## it concatenates the stochastic feature and context of previous generative layer
self._merge_generator_network=LAYER_CLASS[self._merge_generator_hparams.layer_type](hparams=self._merge_generator_hparams)
else:
self._merge_generator_network=tf.keras.layers.Lambda(lambda x:tf.concat(x,axis=-1))
if self._merge_encoder_hparams is not None:
## it concatenates the stochastic feature of the previous generative layer with the deterministic features of the encoder of the current layer
self._merge_encoder_network=LAYER_CLASS[self._merge_encoder_hparams.layer_type](hparams=self._merge_encoder_hparams)
else:
self._merge_encoder_network=tf.keras.layers.Lambda(lambda x:tf.concat(x,axis=-1))
# build the posterior distribution
self._posterior_network=DISTRIBUTION_LAYER_CLASS[self._posterior_type](shape=self._latent_shape,
hparams=self._posterior_hparams,
)
# build the prior distribution
if self._prior_hparams is not None:
self._prior_network=DISTRIBUTION_LAYER_CLASS[self._prior_type](shape=self._latent_shape,
hparams=self._prior_hparams,
)
else:
self._prior_network=None
def compute_context(self, latent_condition=None, context=None,training=False,num_samples=1):
"""
method that combines the latent factor and the context of the previous generative layer
to compute the context of the current layer.
"""
if latent_condition is not None:
latent_condition = tf.convert_to_tensor(latent_condition, dtype=self.dtype, name='previous condition')
if context is not None:
context = tf.convert_to_tensor(context, dtype=self.dtype, name='context')
prev_context=self._merge_generator_network([context,latent_condition],training=False)#
else:
prev_context=latent_condition
## this is the blue 'r' block in Figure 2. in [1]
return self._generator_network(prev_context,training=training)
## this is the blue 'h' block in Figure 2. in [1]
if self._constant_generator:
context_condition=self._generator_network(0.0)
return tf.tile(context_condition,multiples=[num_samples,1,1,1])
else:
return None
def generate(self, latent_condition=None, context=None, num_samples=1):
"""
method that samples from the prior distribution of the layer.
"""
context_condition= self.compute_context(latent_condition=latent_condition, context=context,training=False,num_samples=num_samples)
if self._prior_network is not None:
xi=self._prior_network.sample(condition=context_condition,training=False,num_samples=num_samples)
else:
latent_shape=self._latent_shape
loc=np.zeros(latent_shape , dtype=np.float32)
scale_diag=np.ones(latent_shape , dtype=np.float32)
prior = tfp.distributions.MultivariateNormalDiag(loc=loc,scale_diag=scale_diag)
xi=prior.sample(sample_shape=num_samples)
xi=tf.reshape(xi,shape=prefer_static.concat([[-1], self._latent_shape], axis=0))
return xi, context_condition
def infer(self,condition,direction,training, latent_condition=None, context=None,num_samples=1,compute_q_logp=False,compute_p_logp=False):
"""
inference method for the layer.
[1]. <NAME>. and <NAME>., 2020. Nvae: A deep hierarchical variational autoencoder. arXiv preprint arXiv:2007.03898.
Inputs:
condition: deterministic tensor indicating the input from the next layer during the bottom-up pass.
direction: bottom-up for the deterministic pass of the encoder (bottom-up model) or top-down for the stochastic pass of the generator (top-down model).
context: context provided by the previous generative layer in the hierarchy.
latent_condition: latent factor drawn from the previous variational layer. This are z-s in Figure 2. in [1].
num_samples: number of samples to be drawn from the posterior during the inference.
compute_q_logp: flag indicating whether the likelihood of the posterior distribution for the samples drawn during the inference to be returned.
compute_p_logp: flag indicating whether the likelihood of the prior distribution for the samples drawn during the inference to be returned.
Returns:
xi: latent factor sampled from the variational layer.
context_condition: context used by the variational layer that carries information from the previous layers for conditioning the prior and the posterior distribution.
kl_loss: kl divergence between the posterior and the prior.
q_logp: the likelihood of the posterior distribution for the samples drawn during the inference to be returned if comput_q_logp=True.
o_logp: the likelihood of the prior distribution for the samples drawn during the inference to be returned if comput_p_logp=True.
"""
with tf.name_scope(self.name or 'VariationalLayer_infer'):
## process evidence condition of current layer
condition = tf.convert_to_tensor(condition, dtype=self.dtype, name='condition')
## bottom-up/ red path in Figure 2. in [1].
if direction=='bottom-up':
return self._encoder_network(condition,training)
## top-down/ blue path in Figure 2. in [1].
## combine context and latent factor of previous layer, to get the current context
context_condition= self.compute_context(latent_condition=latent_condition, context=context,training=training,num_samples=tf.shape(condition)[0])
prior_params=None
if latent_condition is not None:
# form the prior distribution
if self._prior_network is not None:
self._prior_network.call(condition=context_condition,training=training)
# extract the prior parameters
if self._residual:
prior_params=self._prior_network.params()
# form the condition of the posterior.
# this includes the deterministic features of the bottom-up path and the stochastic context of the generative layer.
condition=self._merge_encoder_network([condition,context_condition])
self._posterior_condition=condition
if not compute_q_logp:
# pass the prior params, if any
xi=self._posterior_network.sample(condition= self._posterior_condition,training=training,num_samples=num_samples,initial_params=prior_params)
q_logp=tf.zeros(shape=(num_samples,tf.shape(condition)[0]))
else:
xi,q_logp=self._posterior_network.sample_and_log_prob(condition= self._posterior_condition,training=training,num_samples=num_samples,initial_params=prior_params)
q_logp=q_logp[-1]
# compute log-lieklihoods of the latent factor, if requrestd
if compute_p_logp:
if self._prior_network is not None:
p_logp=self._prior_network._p[-1].log_prob(xi)
p_logp=tf.reduce_sum(p_logp,axis=[-1-i for i in range(tensorshape_util.rank( self._prior_network._shape)-1)])
else:
# if there is no prior network, the standard normal is considered
prior = tfp.distributions.MultivariateNormalDiag(loc=np.zeros(self._latent_shape, dtype=np.float32),scale_diag=np.ones(self._latent_shape, dtype=np.float32))
p_logp=tf.reduce_sum(prior.log_prob(xi),axis=[-1-i for i in range(tensorshape_util.rank(self._latent_shape)-1)])
else:
p_logp=tf.zeros(shape=(num_samples,tf.shape(condition)[0]))
xi=tf.reshape(xi,shape=prefer_static.concat([[-1], self._latent_shape], axis=0))
q_logp=tf.reshape(q_logp,shape=(-1,))
p_logp=tf.reshape(p_logp,shape=(-1,))
## compute kl (posterior||prior)
if self._prior_network is not None:
p=self._prior_network._p[0]
q=self._posterior_network._p[-1]
kl_loss=tfp.distributions.kl_divergence(q,p)
else:
# if there is no prior network, the standard normal is considered
prior = tfp.distributions.MultivariateNormalDiag(loc=np.zeros(self._latent_shape, dtype=np.float32),scale_diag=np.ones(self._latent_shape, dtype=np.float32))
kl_loss=kullback_leibler_divergence(_p=self._posterior_network._p[-1],_q=prior,training=training,p_condition= self._posterior_condition,q_condition=None)
kl_loss=tf.reduce_sum(kl_loss,axis=[i for i in range(1,tensorshape_util.rank(self._latent_shape))])
return xi, context_condition, kl_loss, q_logp, p_logp
@property
def p(self):
""" the prior distribution of the variational layer. """
self._prior_network.call(self._prior_condition,training=False)
return self._prior_network.p
@property
def q(self):
""" the posterior distribution of the variational layer. """
self._posterior_network.call(self._posterior_condition,training=False)
return self._posterior_network.p
|
StarcoderdataPython
|
6474211
|
import shoulder
from scapula.filter import filters
from scapula.transform import transforms
from scapula.generator.scapula_generator import ScapulaGenerator
import scapula.writer as writer
class ReadRes1Generator(ScapulaGenerator):
def setup(self, regs):
regs = shoulder.filter.filters["aarch64"].filter_inclusive(regs)
regs = filters["register_has_res1"].filter_inclusive(regs)
regs = transforms["only_res1"].transform(regs)
regs = transforms["unique_res1"].transform(regs)
if self.verbose:
self.gadgets["scapula.gadget.testcase"].verbose = True
return regs
def generate_testcase(self, outfile, reg):
if reg.is_readable():
var1 = writer.declare_variable(outfile, "val", reg.size)
for fs_idx, fs in enumerate(reg.fieldsets):
for f_idx, f in enumerate(fs.fields):
writer.get_field(outfile, reg, f, var1)
writer.if_statement(outfile, var1 + " != 1")
msg = "RES1 field " + reg.name + "." + f.name + " != 1"
writer.print_warning(outfile, msg, indent=1)
writer.endif(outfile)
writer.write_newline(outfile)
|
StarcoderdataPython
|
104145
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# qwiic_as6212.py
#
# Python module for the AS6212 Digital Temperature Sensor Qwiic
#
#------------------------------------------------------------------------
#
# Written by <NAME>, SparkFun Electronics, Aug 2021
#
# Thanks to <NAME> and <NAME> @ SparkFun Electronics
# for code examples from TMP102 Python Package, May 2021
# (https://github.com/sparkfun/Qwiic_TMP102_Py)
#
# Thanks to <NAME>. This library was based off his
# original library created 07/15/2020 and can be found here:
# https://github.com/will2055/AS6212-Arduino-Library/
#
# Thanks to <NAME> @ SparkFun Electronics
# for code examples from TMP117 Arduino Library
# (https://github.com/sparkfun/SparkFun_TMP117_Arduino_Library)
#
# This python library supports the SparkFun Electroncis qwiic
# qwiic sensor/board ecosystem on a Raspberry Pi (and compatable) single
# board computers.
#
# More information on qwiic is at https://www.sparkfun.com/qwiic
#
# Do you like this library? Help support SparkFun. Buy a board!
#
#==================================================================================
# Copyright (c) 2021 SparkFun Electronics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#==================================================================================
"""
qwiic_as6212
============
Python module for the [SparkFun Digital Temperature Sensor Breakout - AS6212 (Qwiic)](https://www.sparkfun.com/products/18521)
This python package is a port of the existing [SparkFun Qwiic AS6212 Sensor Arduino Library](https://github.com/sparkfun/SparkFun_TMP102_Arduino_Library/tree/master/examples)
This package can be used in conjunction with the overall [SparkFun qwiic Python Package](https://github.com/sparkfun/Qwiic_Py)
New to qwiic? Take a look at the entire [SparkFun qwiic ecosystem](https://www.sparkfun.com/qwiic).
"""
from __future__ import print_function, division
import qwiic_i2c
#======================================================================
# Basic setup of I2C commands and available I2C Addresses
#
#
# The name of this device - note this is private
_DEFAULT_NAME = "Qwiic AS6212 Sensor"
AS6212_DEFAULT_ADDRESS = 0x48
#Internal Register Addresses
TVAL_REG = 0x0 #Temperature Register
CONFIG_REG = 0x1 #Configuration Register
TLOW_REG = 0x2 #Low Temperature Threshold
THIGH_REG = 0x3 #High Temperature Threshold
#Helpful preset definitions for configuration register
DEFAULTM = 0x40A0 #Default state
SLEEPMODE = 0x41A0 #Sleep Mode
SLEEPSS = 0xC1A0 #Sleep Mode Single Shot
# Register values (MSB -> LSB)
SINGLESHOT = 0x8000 #15
CFAULT_1 = 0x0800 #12
CFAULT_0 = 0x0400 #11
POLARITY = 0x0200 #10
INTERRUPT = 0x0100 #9
SLEEP = 0x0080 #8
CONVER_RATE_1 = 0x0040 #7
CONVER_RATE_0 = 0x0020 #6
ALERT = 0x0010 #5
AS6212_RESOLUTION = 0.0078125
AS6212_CONFIG_BIT_ALERT = 5
AS6212_CONFIG_BIT_CONVERSION_RATE_0 = 6
AS6212_CONFIG_BIT_CONVERSION_RATE_1 = 7
AS6212_CONFIG_BIT_SLEEP_MODE = 8
AS6212_CONFIG_BIT_INTERRUPT_MODE = 9
AS6212_CONFIG_BIT_ALERT_POL = 10
AS6212_CONFIG_BIT_CONSECUTIVE_FAULTS_0 = 11
AS6212_CONFIG_BIT_CONSECUTIVE_FAULTS_1 = 12
AS6212_CONFIG_BIT_SINGLE_SHOT = 15
#Address can be set using jumpers on bottom of board (default: 0x48)
_AVAILABLE_I2C_ADDRESS = [0x48, 0x44, 0x45, 0x46, 0x47, 0x49, 0x4A, 0x4B]
###############################################################################
###############################################################################
# Some devices have multiple available addresses - this is a list of these addresses.
# NOTE: The first address in this list is considered the default I2C address for the
# device.
class QwiicAs6212Sensor(object):
"""
QwiicAs6212Sensor
:param address: The I2C address to use for the device.
If not provided, the default address is used.
:param i2c_driver: An existing i2c driver object. If not provided
a driver object is created.
:return: The AS6212 Sensor device object.
:rtype: Object
"""
device_name = _DEFAULT_NAME
available_addresses = _AVAILABLE_I2C_ADDRESS
AS6212_MODE_COMPARATOR = 0
AS6212_MODE_INTERRUPT = 1
AS6212_CONVERSION_CYCLE_TIME_125MS = 3
AS6212_CONVERSION_CYCLE_TIME_250MS = 2
AS6212_CONVERSION_CYCLE_TIME_1000MS = 1
AS6212_CONVERSION_CYCLE_TIME_4000MS = 0
AS6212_ALERT_ACTIVE_HIGH = 1
AS6212_ALERT_ACTIVE_LOW = 0
# Constructor
def __init__(self, address=None, i2c_driver=None):
# Did the user specify an I2C address?
self.address = self.available_addresses[0] if address is None else address
# load the I2C driver if one isn't provided
if i2c_driver is None:
self._i2c = qwiic_i2c.getI2CDriver()
if self._i2c is None:
print("Unable to load I2C driver for this platform.")
return
else:
self._i2c = i2c_driver
# ----------------------------------
# is_connected()
#
# Is an actual board connected to our system?
def is_connected(self):
"""
Determine if a Soil MoistureSensor device is conntected to the system..
:return: True if the device is connected, otherwise False.
:rtype: bool
"""
return qwiic_i2c.isDeviceConnected(self.address)
connected = property(is_connected)
# ----------------------------------
# begin()
#
# Initialize the system/validate the board.
def begin(self):
"""
Initialize the operation of the Soil Moisture Sensor module
:return: Returns true of the initialization was successful, otherwise False.
:rtype: bool
"""
# Set variables
self.tempC = 0
self.tempF = 0
# Basically return True if we are connected...
return self.is_connected()
#****************************************************************************#
#
# Sensor functions
#
# ****************************************************************************#
def get_address(self):
"""
Returns the device address
"""
return self.address
def read_2_byte_register(self, register_to_read):
"""
Reads two bytes of data from a desired register.
Combines them into a single 16 bit value
Returns single value
"""
data = self._i2c.readBlock(self.address, register_to_read, 2)
#Combine bytes to create a single signed int
return ( (data[0] << 8 ) | data[1] )
def write_register(self, reg, data):
data_bytes = [0,0]
data_bytes[1] |= (data & 0xFF)
data_bytes[0] |= data >> 8
self._i2c.writeBlock(self.address, reg, data_bytes)
def read_config(self):
return self.read_2_byte_register(CONFIG_REG)
def write_config(self, targetState):
self.write_register(CONFIG_REG, targetState)
def read_temp_c(self):
"""
Reads the results from the sensor
:rtype: integer
"""
digitalTempC = self.read_2_byte_register(TVAL_REG)
if (digitalTempC < 32768):
finalTempC = digitalTempC * 0.0078125
if (digitalTempC >= 32768):
finalTempC = ((digitalTempC - 1) * 0.0078125) * -1
self.tempC = finalTempC
return self.tempC
def read_temp_f(self):
"""
Reads the results from the sensor
:rtype: integer
"""
self.tempF = self.read_temp_c() * 9.0 / 5.0 + 32.0
return self.tempF
def set_alert_polarity(self, polarity):
"""
Set the polarity of Alert
AS6212_ALERT_ACTIVE_HIGH (1)
AS6212_ALERT_ACTIVE_LOW (0)
"""
configReg = self.read_config()
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_ALERT_POL, polarity)
self.write_config(configReg)
def get_alert_polarity(self):
"""
Get the polarity of Alert
AS6212_ALERT_ACTIVE_HIGH (1)
AS6212_ALERT_ACTIVE_LOW (0)
"""
configReg = self.read_config()
return self.bit_read(configReg, AS6212_CONFIG_BIT_ALERT_POL)
def set_interrupt_mode(self, mode):
"""
sets the interrupt mode bits in the config register
valid options are:
AS6212_MODE_COMPARATOR (0)
AS6212_MODE_INTERRUPT (1)
"""
configReg = self.read_config()
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_INTERRUPT_MODE, mode)
self.write_config(configReg)
def get_interrupt_mode(self):
"""
Get the interrupt mode bit
AS6212_MODE_COMPARATOR (0)
AS6212_MODE_INTERRUPT (1)
"""
configReg = self.read_config()
return self.bit_read(configReg, AS6212_CONFIG_BIT_INTERRUPT_MODE)
def get_alert_status(self):
"""
Get the status of the alert bit (0 or 1)
"""
configReg = self.read_config()
return self.bit_read(configReg, AS6212_CONFIG_BIT_ALERT)
def set_consecutive_faults(self, faults):
"""
Set the number of consecutive faults
1 - 1 fault
2 - 2 faults
3 - 3 faults
4 - 4 faults
"""
if (faults > 4) or (faults < 1):
return NaN
faults = faults - 1 # consecutive faults value is stored in just 2 bits in the config reg,
# so we must convert from "human readable" ints 1-4 to stored values (0-3).
configReg = self.read_config()
configBit_11 = self.bit_read(faults, 0)
configBit_12 = self.bit_read(faults, 1)
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_CONSECUTIVE_FAULTS_0, configBit_11)
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_CONSECUTIVE_FAULTS_1, configBit_12)
self.write_config(configReg)
def get_consecutive_faults(self):
"""
Gets the number of consecutive faults that need to happen in a row before alert is changed.
valid settings are 1,2,3 or 4 but this correspond to other bit values
in the configuration register bits 11 and 12
"""
configReg = self.read_config()
consecutive_faults_bit_0 = self.bit_read(configReg, AS6212_CONFIG_BIT_CONSECUTIVE_FAULTS_0)
consecutive_faults_bit_1 = self.bit_read(configReg, AS6212_CONFIG_BIT_CONSECUTIVE_FAULTS_1)
faults = 0
faults = self.bit_write(faults, 0, consecutive_faults_bit_0)
faults = self.bit_write(faults, 1, consecutive_faults_bit_1)
faults = ( faults + 1 ) # consecutive faults is stored in just two bits in teh config reg
# so we must +1 to make it the stored values (0-3) human readable (1-4)
return faults
def set_conversion_cycletime(self, cycletime):
"""
sets the conversion cylce time (aka convertion rate) in the config register
valid settings are:
AS6212_CONVERSION_CYCLE_TIME_125MS
AS6212_CONVERSION_CYCLE_TIME_250MS
AS6212_CONVERSION_CYCLE_TIME_1000MS
AS6212_CONVERSION_CYCLE_TIME_4000MS
"""
#discard out of range values
if cycletime > 3 or cycletime < 0:
return nan
configReg = self.read_config()
configBit_6 = self.bit_read(cycletime, 0)
configBit_7 = self.bit_read(cycletime, 1)
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_CONVERSION_RATE_0, configBit_6)
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_CONVERSION_RATE_1, configBit_7)
self.write_config(configReg)
def get_conversion_cycletime(self):
"""
Gets the conversion cycle time (aka conversion rate) in teh config reg
Returns the cycle time in milliseconds: (125/250/1000/4000)
"""
configReg = self.read_config()
conversion_rate_bit_0 = self.bit_read(configReg, AS6212_CONFIG_BIT_CONVERSION_RATE_0)
conversion_rate_bit_1 = self.bit_read(configReg, AS6212_CONFIG_BIT_CONVERSION_RATE_1)
cycletime_val = 0
cycletime_val = self.bit_write(cycletime_val, 0, conversion_rate_bit_0)
cyceltime_val = self.bit_write(cycletime_val, 1, conversion_rate_bit_1)
if cycletime_val == AS6212_CONVERSION_CYCLE_TIME_125MS:
return 125
if cycletime_val == AS6212_CONVERSION_CYCLE_TIME_250MS:
return 250
if cycletime_val == AS6212_CONVERSION_CYCLE_TIME_1000MS:
return 1000
if cycletime_val == AS6212_CONVERSION_CYCLE_TIME_4000MS:
return 4000
def set_sleep_mode(self, mode):
"""
sets the sleep mode bit (on or off) in the config register
valid options are:
0 = SLEEP MODE OFF
1 = SLEEP MODE ON
"""
configReg = self.read_config()
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_SLEEP_MODE, mode)
if mode == 1: # as recommended in datasheet section 6.2.4
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_SINGLE_SHOT, 1)
self.write_config(configReg)
def get_sleep_mode(self):
"""
gets the status of the sleep mode bit from the config register
"""
configReg = self.read_config()
return self.bit_read(configReg, AS6212_CONFIG_BIT_SLEEP_MODE)
def trigger_single_shot_conversion(self):
"""
Sets the SS mode bit in the config register
Note, you must be in sleep mode for this to work
"""
configReg = self.read_config()
if self.bit_read(configReg, AS6212_CONFIG_BIT_SLEEP_MODE) == 1:
configReg = self.bit_write(configReg, AS6212_CONFIG_BIT_SINGLE_SHOT, 1)
self.write_config(configReg)
def get_single_shot_status(self):
"""
gets the status of the single shot bit from the config register
0 = No conversion ongoing / conversion finished
1 = Start single shot conversion / conversion ongoing
"""
configReg = self.read_config()
return self.bit_read(configReg, AS6212_CONFIG_BIT_SINGLE_SHOT)
def set_low_temp_c(self, temperature):
"""
Sets T_LOW (degrees C) alert threshold
"""
if temperature >= 0: # positive number or zero
low_temp = int(temperature / 0.0078125)
if temperature < 0: #negative number
temperature /= 0.0078125
temp_int = int(temperature)
temp_int &= 0xFFFF
low_temp = ( ~(temp_int) + 1 ) * -1
self.write_register(TLOW_REG, low_temp)
def set_high_temp_c(self, temperature):
"""
Sets THIGH (degrees C) alert threshold
"""
if temperature >= 0: # positive number or zero
high_temp = int(temperature / 0.0078125)
if temperature < 0: #negative number
temperature /= 0.0078125
temp_int = int(temperature)
temp_int &= 0xFFFF
high_temp = ( ~(temp_int) + 1 ) * -1
self.write_register(THIGH_REG, high_temp)
def set_low_temp_f(self, temperature):
"""
Sets T_LOW (degrees F) alert threshold
"""
new_temp = (temperature - 32)*5/9 # Convert temperature to C
self.set_low_temp_c(new_temp) # Set T_LOW
def set_high_temp_f(self, temperature):
"""
Sets T_HIGH (degrees F) alert threshold
"""
new_temp = (temperature - 32)*5/9 # Convert temperature to C
self.set_high_temp_c(new_temp) # Set T_HIGH
def read_low_temp_c(self):
"""
Gets T_LOW (degrees C) alert threshold
"""
digital_temp = self.read_2_byte_register(TLOW_REG)
if (digital_temp < 32768):
finalTempC = float(digital_temp) * 0.0078125
if (digital_temp >= 32768):
digital_temp = ~digital_temp
digital_temp &= 0xFFFF
finalTempC = (( digital_temp + 1 ) * 0.0078125) * -1
return finalTempC
def read_high_temp_c(self):
"""
Gets T_HIGH (degrees C) alert threshold
"""
digital_temp = self.read_2_byte_register(THIGH_REG)
if (digital_temp < 32768):
finalTempC = digital_temp * 0.0078125
if (digital_temp >= 32768):
digital_temp = ~digital_temp
digital_temp &= 0xFFFF
finalTempC = (( digital_temp + 1 ) * 0.0078125) * -1
return finalTempC
def read_low_temp_f(self):
"""
Reads T_LOW register in F
"""
return self.read_low_temp_c()*9.0/5.0 + 32.0
def read_high_temp_f(self):
"""
Reads T_HIGH register in F
"""
return self.read_high_temp_c()*9.0/5.0 + 32.0
def bit_read(self, value, bit):
return (((value) >> (bit)) & 0x01)
def bit_set(self, value, bit):
return ((value) | (1 << (bit)))
def bit_clear(self, value, bit):
return ((value) & ~(1 << (bit)))
def bit_write(self, value, bit, bitvalue):
if (bitvalue == 1):
return self.bit_set(value, bit)
if (bitvalue == 0):
return self.bit_clear(value, bit)
def print_config_binary(self):
configReg = self.read_config()
print("\nConfig: ")
configBin = ""
for i in range(15, -1, -1):
configBin += str(self.bit_read(configReg, i))
if i == 8:
configBin += " "
print(configBin)
def print_binary(self, data):
configReg = self.read_config()
print("\ndata: ")
dataBin = ""
for i in range(15, -1, -1):
dataBin += str(self.bit_read(data, i))
if i == 8:
dataBin += " "
print(dataBin)
|
StarcoderdataPython
|
114080
|
from guillotina.interfaces import IAddOn
from zope.interface import implementer
@implementer(IAddOn)
class Addon(object):
""" Prototype of an Addon plugin
"""
@classmethod
def install(cls, container, request):
pass
@classmethod
def uninstall(cls, container, request):
pass
|
StarcoderdataPython
|
1996150
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
import pytest
from pyomo.environ import ConcreteModel, Var, Constraint, Block, SolverFactory
from idaes.core import FlowsheetBlock
from idaes.core.util import get_solver
import idaes.logger as idaeslog
from watertap.util.initialization import *
__author__ = "<NAME>"
_log = idaeslog.getLogger(__name__)
# Set up solver
solver = get_solver()
class TestCheckDOF:
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.a = Var()
m.b = Var()
m.abcon = Constraint(rule=m.a + m.b == 10)
return m
@pytest.mark.unit
def test_expected(self, m):
check_dof(m, fail_flag=False, expected_dof=1)
check_dof(m, fail_flag=True, expected_dof=1)
assert_degrees_of_freedom(m, 1)
@pytest.mark.unit
def test_more_expected(self, m):
check_dof(m, fail_flag=False, expected_dof=3)
msg = r"Unexpected degrees of freedom: Degrees of freedom on unknown = 1. Expected 3. Unfix 2 variable\(s\)"
with pytest.raises(ValueError, match=msg):
check_dof(m, fail_flag=True, expected_dof=3)
with pytest.raises(ValueError, match=msg):
assert_degrees_of_freedom(m, 3)
@pytest.mark.unit
def test_less_expected(self, m):
check_dof(m, fail_flag=False, expected_dof=-1)
msg = r"Unexpected degrees of freedom: Degrees of freedom on unknown = 1. Expected -1. Fix 2 variable\(s\)"
with pytest.raises(ValueError, match=msg):
check_dof(m, fail_flag=True, expected_dof=-1)
with pytest.raises(ValueError, match=msg):
assert_degrees_of_freedom(m, -1)
@pytest.mark.unit
def test_zero_expected(self, m):
# check_dof should pass since fail_flag=False produces warning for DOF!=0
check_dof(m, fail_flag=False)
msg = r"Non-zero degrees of freedom: Degrees of freedom on unknown = 1. Fix 1 more variable\(s\)"
# Verify error is raised since DOF!=0
with pytest.raises(ValueError, match=msg):
check_dof(m, fail_flag=True)
with pytest.raises(ValueError, match=msg):
assert_no_degrees_of_freedom(m)
@pytest.mark.unit
def test_zero(self, m):
m.a.fix(5)
# check should pass since DOF=0
check_dof(m, fail_flag=True)
check_dof(m, fail_flag=True, expected_dof=0)
assert_no_degrees_of_freedom(m)
m.a.unfix()
class TestCheckSolve:
@pytest.fixture(scope="class")
def m(self):
m = ConcreteModel()
m.a = Var()
m.acon = Constraint(rule=m.a >= 10)
m.bcon = Constraint(rule=m.a == 5)
return m
@pytest.mark.unit
def test_failure(self, m):
results = solver.solve(m)
# check_solve should pass since fail_flag=False and only warning will be produced
check_solve(results, logger=_log, fail_flag=False)
# expect the solve to fail and raise error
with pytest.raises(ValueError, match="The solver failed to converge to an optimal solution. This suggests that the "
"user provided infeasible inputs or that the model is poorly scaled."):
check_solve(results, logger=_log, fail_flag=True)
@pytest.mark.unit
def test_failure_checkpoint(self, m):
results = solver.solve(m)
# check_solve should pass since fail_flag=False and only warning will be produced
check_solve(results, checkpoint='test', logger=_log, fail_flag=False)
# expect the solve to fail and raise error
with pytest.raises(ValueError, match="test failed. The solver failed to converge to an optimal solution. "
"This suggests that the user provided infeasible inputs or that the model is poorly scaled."):
check_solve(results, checkpoint='test', logger=_log, fail_flag=True)
@pytest.mark.unit
def test_success(self, m):
m.acon.deactivate()
results = solver.solve(m)
# both check_solve's should pass
check_solve(results, logger=_log, fail_flag=False)
check_solve(results, logger=_log, fail_flag=True)
m.acon.activate()
class TestPerturbationHelper:
@pytest.fixture(scope="class")
def b(self):
b = Block(concrete=True)
b.x = Var(bounds=(1e-8, None), initialize=1e-7)
b.y = Var(bounds=(1e+1, 1e+2), initialize=1e+3)
b.z = Var(bounds=(0., 1e-8), initialize=1e-20)
b.z.fix()
b.w = Var(bounds=(None, 1), initialize=0.5)
return b
@pytest.mark.unit
def test_generate_initialization_perturbation(self, b):
r = list(generate_initialization_perturbation(b))
assert r[0][0].name == 'x'
assert r[1][0].name == 'y'
assert r[0][1] == 1e-7
assert r[1][1] == 1e+3
assert r[0][2] == 1e-2
assert r[1][2] == 99.100000989
r = list(generate_initialization_perturbation(b, bound_relax_factor=0.))
assert r[0][0].name == 'x'
assert r[1][0].name == 'y'
assert r[0][2] == 1.000001e-2
assert r[1][2] == 99.1
r = list(generate_initialization_perturbation(b, bound_relax_factor=0., bound_frac=1e-3))
assert r[0][0].name == 'x'
assert r[1][0].name == 'y'
assert r[0][2] == 1.000001e-2
assert r[1][2] == 99.91
r = list(generate_initialization_perturbation(b, bound_relax_factor=0., bound_frac=1e-3, bound_push=1e-3))
assert r[0][0].name == 'x'
assert r[1][0].name == 'y'
assert r[0][2] == 1.00001e-3
assert r[1][2] == 99.91
r = list(generate_initialization_perturbation(b, bound_relax_factor=0., bound_frac=1e-3, bound_push=1e-10))
assert r[0][0].name == 'y'
assert r[0][2] == 100.-1e-8
r = list(generate_initialization_perturbation(b, bound_push=1e-6))
assert r[0][0].name == 'x'
assert r[1][0].name == 'y'
assert r[0][2] == 1.e-6
assert r[1][2] == 99.999900999999
@pytest.mark.unit
def test_print_initialization_perturbation(self, b, capsys):
print_initialization_perturbation(b, 1e-2, 1e-2, 1e-8, True)
captured = capsys.readouterr()
assert captured.out == \
"""IPOPT will move scaled initial value for variable x from 1.000000e-07 to 1.000000e-02
IPOPT will move scaled initial value for variable y from 1.000000e+03 to 9.910000e+01
"""
@pytest.mark.unit
def test_assert_no_initialization_perturbation1(self, b):
with pytest.raises(ValueError, match=\
"IPOPT will move scaled initial value for variable x from 1.000000e-07 to 1.000000e-02"):
assert_no_initialization_perturbation(b)
@pytest.mark.unit
def test_assert_no_initialization_perturbation2(self, b):
optarg = {'bound_push' : 1e-10}
b.y.value = 5e+1
assert_no_initialization_perturbation(b, optarg=optarg)
@pytest.mark.unit
def test_assert_no_initialization_perturbation3(self, b):
solver = get_solver()
solver.options['bound_push'] = 1e-10
b.y.value = 5e+1
assert_no_initialization_perturbation(b, solver=solver)
@pytest.mark.unit
def test_assert_no_initialization_perturbation4(self, b):
with pytest.raises(ValueError, match="Supply a solver or optarg, not both"):
assert_no_initialization_perturbation(b, solver=b, optarg=b)
@pytest.mark.unit
def test_assert_no_initialization_perturbation5(self, b):
with pytest.raises(ValueError, match="Solver cbc is not supported"):
assert_no_initialization_perturbation(b, solver=SolverFactory('cbc'))
|
StarcoderdataPython
|
9768276
|
import subprocess
import sys
import json
import platform
import os
from crmetrics import CRBase
class CRLogs(CRBase):
def _get_container_logs(self, pod, namespace, containers, kubeconfig):
for c in containers:
container = c['name']
cmd = 'kubectl logs ' + pod + ' -n ' + namespace + ' -c ' + container + ' ' + kubeconfig
#print(cmd)
print("======== Pod::" + pod + "/container::" + container + " ===========")
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
if out:
print(out)
except Exception as e:
print(e)
def get_logs(self, pod, namespace, kubeconfig):
cmd = 'kubectl get pods ' + pod + ' -n ' + namespace + ' -o json ' + kubeconfig
#print(cmd)
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
if out:
json_output = json.loads(out)
containers = json_output['spec']['containers']
self._get_container_logs(pod, namespace, containers, kubeconfig)
if 'initContainers' in json_output['spec']:
init_containers = json_output['spec']['initContainers']
self._get_container_logs(pod, namespace, init_containers, kubeconfig)
except Exception as e:
print(e)
def get_resources_composition(self, kind, instance, namespace, kubeconfig):
platf = platform.system()
kubeplus_home = os.getenv('KUBEPLUS_HOME')
cmd = ''
json_output = {}
if platf == "Darwin":
cmd = kubeplus_home + '/plugins/kubediscovery-macos composition '
elif platf == "Linux":
cmd = kubeplus_home + '/plugins/kubediscovery-linux composition '
else:
print("OS not supported:" + platf)
return json_output
cmd = cmd + kind + ' ' + instance + ' ' + namespace + ' ' + kubeconfig
#print(cmd)
out = ''
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
out = out.decode('utf-8')
except Exception as e:
print(e)
if out:
print(out)
try:
json_output = json.loads(out)
except Exception as e:
print(e)
return json_output
def get_pods(self, resources):
pod_list = []
for resource in resources:
#print(resource)
if resource['Kind'] == 'Pod':
present = False
for p in pod_list:
if p['Name'] == resource['Name']:
present = True
break
if not present:
pod_list.append(resource)
#print(pod_list)
return pod_list
if __name__ == '__main__':
crLogs = CRLogs()
#crLogs.get_logs(sys.argv[1], sys.argv[2])
#resources = sys.argv[1]
relation = sys.argv[1]
kind = sys.argv[2]
instance = sys.argv[3]
namespace = sys.argv[4]
kubeconfig = sys.argv[5]
#print(kind + " " + instance + " " + namespace + " " + kubeconfig)
resources = {}
if relation == 'connections':
resources = crLogs.get_resources_connections(kind, instance, namespace, kubeconfig)
#print(resources)
if relation == 'composition':
resources = crLogs.get_resources_composition(kind, instance, namespace, kubeconfig)
#print(resources)
#resource_json = json.loads(resources)
pods = crLogs.get_pods(resources)
for pod in pods:
pod_name = pod['Name']
pod_namespace = pod['Namespace']
#print(pod_name)
crLogs.get_logs(pod_name, pod_namespace, kubeconfig)
print("---------------------------------------")
|
StarcoderdataPython
|
6526584
|
<reponame>ikeikeikeike/cachers
import unittest
from cachers import FIFOCache
from . import CacheTestMixin
class FIFOCacheTest(unittest.TestCase, CacheTestMixin):
Cache = FIFOCache
def test_fifo(self):
cache = FIFOCache(maxsize=2)
cache[1] = 1
cache[2] = 2
cache[3] = 3
self.assertEqual(len(cache), 2)
self.assertEqual(cache[2], 2)
self.assertEqual(cache[3], 3)
self.assertNotIn(1, cache)
cache[2]
cache[4] = 4
self.assertEqual(len(cache), 2)
self.assertEqual(cache[3], 3)
self.assertEqual(cache[4], 4)
self.assertNotIn(2, cache)
cache[5] = 5
self.assertEqual(len(cache), 2)
self.assertEqual(cache[4], 4)
self.assertEqual(cache[5], 5)
self.assertNotIn(3, cache)
def test_fifo_order(self):
cache = FIFOCache(maxsize=100)
for i in range(0, 100):
cache[i] = i
for i in range(0, 100):
self.assertEqual(cache[i], i)
for i in range(200, 250):
cache[i] = i
for i in range(50, 100):
self.assertEqual(cache[i], i)
for i in range(200, 250):
self.assertEqual(cache[i], i)
for left, (right, _) in zip(list(range(50, 100)) + list(range(200, 250)), cache.data):
self.assertEqual(left, right)
|
StarcoderdataPython
|
4822215
|
# Subject/Participant
sub='sub-01'
# Total number of experimental runs
total_run=8
# Left-out run for testing
test_run=1
# Predictor ROI
roi_1_name='FFA'
# Target ROI
roi_2_name='GM'
# Functional Data
filepath_func=[]
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run1.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run2.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run3.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run4.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run5.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run6.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run7.nii.gz']
filepath_func+=['./example_data/'+sub+'/'+sub+'_movie_bold_space-MNI152NLin2009cAsym_preproc_denoised_run8.nii.gz']
# Predictor ROI Mask
filepath_mask1='./example_data/'+sub+'/'+sub+'_FFA_80vox_bin.nii.gz'
# Target ROI Mask
filepath_mask2='./example_data/GM_thr0.1_bin.nii.gz'
# Output Directory
roidata_save_dir='./example_data/roi_data/'
results_save_dir='./test/'
# MVPD Model
model_type='NN_1layer' # ['PCA_LR', 'L2_LR', 'NN_1layer', 'NN_5layer', 'NN_5layer_dense']
# PCA + Linear Regression
num_pc=3 # number of principal component used
# L2 Regularized Linear Regression
alpha=0.01 # regularization strength
# Neural Network
input_size=80
output_size=53539
hidden_size=100 # number of units per hidden layer
num_epochs=5000 # number of epochs for training
save_freq=1000 # checkpoint saving frequency
print_freq=100 # print out frequency
batch_size=32
learning_rate=1e-3
momentum_factor=0.9
w_decay=0 # weight decay (L2 penalty)
|
StarcoderdataPython
|
3472117
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from rest_framework import viewsets
from .serializers import programSerializer
from .models import program
class programView(viewsets.ModelViewSet):
serializer_class = programSerializer
queryset = program.objects.all()
|
StarcoderdataPython
|
1972606
|
import turtle
import math
# wasn't sure if we were allowed to use turtle.circle() so i've done it manually
# turtle configs
t = turtle.Turtle()
t.pencolor("#545454")
t.speed(0)
# helper function to move the turtle to a coordinate without drawing a line to it
def moveto(x, y):
t.penup()
t.setposition(x, y)
t.pendown()
# helper function to draw curves with angles = angle, of radius = radius
def drawCircle(angle, radius):
for i in range(angle):
t.forward(((2 * math.pi * radius) / 360))
t.left(1)
# draws a square with sides of length side, centred around (x, y)
# could've done this a little simpler but felt cleaner to have the square centred
def drawSquare(x, y, side):
moveto(x, y)
for i in range(8):
t.forward(side / 2)
t.left(90 if i % 2 else 0)
moveto(x, y)
# tilts the turtle, sets the side using the pythagoras theorem and delegates to drawSquare()
def drawTiltedSquare(x, y, side):
moveto(x, y)
t.left(45)
diagonal_side = math.sqrt(2 * (side * side)) / 2
drawSquare(x, y, diagonal_side)
moveto(x, y)
# calls drawSquare() and drawTiltedSquare() in sequence
def squareInSquare(x, y, side):
t.setheading(0)
drawSquare(x, y, side)
drawTiltedSquare(x + (side / 2), y, side)
# for every iteration, double the side length and call squareInSquare()
def fractal(x, y, startSide, k):
moveto(x, y)
for i in range(
1, k + 1
): # starting with 1 to avoid multiplying with 0 on the first run
t.setheading(0)
startSide *= 2
squareInSquare(x - (startSide / 2), y - (startSide / 2), startSide)
# ^ the -startSide/2 just centres the squares around the centre
# instead of the bottom right corner
# could've done this recursively instead of iteratively but the loop felt better
"""if k:
t.setheading(0)
squareInSquare(x - (startSide / 2), y - (startSide / 2), startSide)
startSide *= 2
fractal(x, y, startSide, k - 1)"""
# calls fractal(), then calls drawCircle() to draw
# semicircles with increasing startLength as radius
def spiralOut(x, y, startLength, k):
fractal(x, y, startLength, k)
moveto(x, y)
for i in range(k * 2):
drawCircle(180, startLength * (i * 2))
# calls fractal(), then calls drawCircle() to draw
# semicircles with increasing startRadius as radius,
# in fibonacci ratio
def drawFibonacci(x, y, startRadius, loops):
fractal(x, y, startRadius, loops)
moveto(x, y)
i, j = startRadius, startRadius
for loop in range(loops):
drawCircle(180, (10 * i)) # the question said semicircle so 180,
# for the actual golden spiral set the first parameter to 90
i, j = i + j, i
print(f"{i}, {j}")
if __name__ == "__main__":
# drawSquare(0, 0, 50) # part (a)
# drawTiltedSquare(0, 0, 50) # part (b)
# squareInSquare(0, 0, 80) # part (c)
# fractal(0, 0, 10, 10) # part (d)
# spiralOut(0, 0, 20, 10) # part (e)
drawFibonacci(0, 0, 2, 10) # extra credit
turtle.done()
|
StarcoderdataPython
|
11268797
|
"""
Compute Pi on a cluster.
It is not recommended to run this application from the IDE.
@author rambabu.posa
"""
import time
from pyspark.sql import SparkSession
from random import random
from operator import add
slices = 10
numberOfThrows = 100000 * slices
print("About to throw {} darts, ready? Stay away from the target!".format(numberOfThrows))
t0 = int(round(time.time() * 1000))
spark = SparkSession.builder.appName("PySparkPi on a cluster (via spark-submit)") \
.master("local[*]").getOrCreate()
#.config("spark.executor.memory", "4g") #no master
t1 = int(round(time.time() * 1000))
print("Session initialized in {} ms".format(t1 - t0))
numList = []
for x in range(numberOfThrows):
numList.append(x)
incrementalRDD = spark.sparkContext.parallelize(numList)
t2 = int(round(time.time() * 1000))
print("Initial dataframe built in {} ms".format(t2 - t1))
def throwDarts(_):
x = random() * 2 - 1
y = random() * 2 - 1
return 1 if x ** 2 + y ** 2 <= 1 else 0
dartsRDD = incrementalRDD.map(throwDarts)
t3 = int(round(time.time() * 1000))
print("Throwing darts done in {} ms".format(t3 - t2))
dartsInCircle = dartsRDD.reduce(add)
t4 = int(round(time.time() * 1000))
print("Analyzing result in {} ms".format(t4 - t3))
print("Pi is roughly {}".format(4.0 * dartsInCircle/numberOfThrows))
spark.stop()
|
StarcoderdataPython
|
4918962
|
import os, copy
from flask_api import status
from models.main import *
from models.appendix import *
from models.segment import *
from models.prescription import *
from models.notes import ClinicalNotes
from flask import Blueprint, request
from flask_jwt_extended import (create_access_token, create_refresh_token,
jwt_required, get_jwt_identity)
from .utils import *
from datetime import datetime
app_pat = Blueprint('app_pat',__name__)
def historyExam(typeExam, examsList, segExam):
results = []
for e in examsList:
if e.typeExam == typeExam:
item = formatExam(e, e.typeExam.lower(), segExam)
del(item['ref'])
results.append(item)
return results
def historyCalc(typeExam, examsList, patient):
results = []
for e in examsList:
item = {}
if typeExam == 'mdrd':
item = mdrd_calc(e['value'], patient.birthdate, patient.gender, patient.skinColor)
elif typeExam == 'cg':
item = cg_calc(e['value'], patient.birthdate, patient.gender, patient.weight)
elif typeExam == 'ckd':
item = ckd_calc(e['value'], patient.birthdate, patient.gender, patient.skinColor)
elif typeExam == 'swrtz2':
item = schwartz2_calc(e['value'], patient.height)
item['date'] = e['date']
results.append(item)
return results
@app_pat.route("/exams/<int:admissionNumber>", methods=['GET'])
@jwt_required()
def getExamsbyAdmission(admissionNumber):
user = User.find(get_jwt_identity())
dbSession.setSchema(user.schema)
idSegment = request.args.get('idSegment', 1)
patient = Patient.findByAdmission(admissionNumber)
if (patient is None):
return { 'status': 'error', 'message': 'Paciente Inexistente!' }, status.HTTP_400_BAD_REQUEST
examsList = Exams.findByPatient(patient.idPatient)
segExam = SegmentExam.refDict(idSegment)
perc = {
'h_conleuc': {
'total' : 1,
'relation': ['h_conlinfoc', 'h_conmono', 'h_coneos', 'h_conbaso', 'h_consegm']
}
}
bufferList = {}
typeExams = []
for e in examsList:
if not e.typeExam.lower() in typeExams and e.typeExam.lower() in segExam:
key = e.typeExam.lower()
item = formatExam(e, key, segExam)
item['name'] = segExam[key].name
item['perc'] = None
item['history'] = historyExam(e.typeExam, examsList, segExam)
item['text'] = False
bufferList[key] = item
typeExams.append(key)
if key in perc:
perc[key]['total'] = float(e.value)
if segExam[key].initials.lower() == 'creatinina':
for keyCalc in ['mdrd','ckd','cg','swrtz2']:
if keyCalc in segExam and patient:
if keyCalc == 'mdrd':
itemCalc = mdrd_calc(e.value, patient.birthdate, patient.gender, patient.skinColor)
elif keyCalc == 'cg':
itemCalc = cg_calc(e.value, patient.birthdate, patient.gender, patient.weight)
elif keyCalc == 'ckd':
itemCalc = ckd_calc(e.value, patient.birthdate, patient.gender, patient.skinColor)
elif keyCalc == 'swrtz2':
itemCalc = schwartz2_calc(e.value, patient.height)
if itemCalc['value']:
itemCalc['name'] = segExam[keyCalc].name
itemCalc['perc'] = None
itemCalc['date'] = item['date']
itemCalc['history'] = historyCalc(keyCalc, item['history'], patient)
bufferList[keyCalc] = itemCalc
for p in perc:
total = perc[p]['total']
for r in perc[p]['relation']:
if r in bufferList:
val = bufferList[r]['value']
bufferList[r]['perc'] = round((val*100)/total,1)
results = copy.deepcopy(segExam)
for e in segExam:
if e in bufferList:
results[e] = bufferList[e]
else:
del(results[e])
examsText = ClinicalNotes.getExamsIfExists(admissionNumber)
resultsText = {}
for e in examsText:
slugExam = slugify(e.prescriber)
if not slugExam in resultsText.keys():
resultsText[slugExam] = {
'name': e.prescriber,
'text': True,
'date': e.date.isoformat(),
'ref': e.text[:20],
'history': []
}
item = {}
item['date'] = e.date.isoformat()
item['value'] = e.text
resultsText[slugExam]['history'].append(item)
resultsText[slugExam]['date'] = e.date.isoformat()
return {
'status': 'success',
'data': dict(results, **resultsText)
}, status.HTTP_200_OK
@app_pat.route('/patient/<int:admissionNumber>', methods=['POST'])
@jwt_required()
def setPatientData(admissionNumber):
user = User.find(get_jwt_identity())
dbSession.setSchema(user.schema)
data = request.get_json()
os.environ['TZ'] = 'America/Sao_Paulo'
p = Patient.findByAdmission(admissionNumber)
if (p is None):
return { 'status': 'error', 'message': 'Paciente Inexistente!' }, status.HTTP_400_BAD_REQUEST
updateWeight = False
if 'weight' in data.keys():
weight = data.get('weight', None)
if weight != p.weight:
p.weightDate = datetime.today()
p.weight = weight
p.user = user.id
updateWeight = True
alertExpire = data.get('alertExpire', None)
if alertExpire and alertExpire != p.alertExpire:
p.alert = data.get('alert', None)
p.alertExpire = alertExpire
p.alertDate = datetime.today()
p.alertBy = user.id
if 'height' in data.keys(): p.height = data.get('height', None)
if 'observation' in data.keys(): p.observation = data.get('observation', None)
p.update = datetime.today()
if 'idPrescription' in data.keys() and updateWeight:
idPrescription = data.get('idPrescription')
query = "INSERT INTO " + user.schema + ".presmed \
SELECT *\
FROM " + user.schema + ".presmed\
WHERE fkprescricao = " + str(int(idPrescription)) + ";"
db.engine.execute(query)
return tryCommit(db, admissionNumber, user.permission())
|
StarcoderdataPython
|
1726926
|
<reponame>ShahabBakht/brain-scorer
import sys
sys.path.append('../')
from loaders import mt2
import numpy as np
import tempfile
import time
import unittest
import torch
from pprint import pprint
class TestMt2Loader(unittest.TestCase):
def test_train(self):
loader = mt2.MT2('../data_derived/crcns-mt2',
nt=32,
nx=64,
ny=64,
split='train',
)
self.assertEqual(len({x['cellnum']: 1 for x in loader.sequence}),
loader.total_electrodes)
self.assertEqual(len({x['cellid']: 1 for x in loader.sequence}),
loader.total_electrodes)
X, m, W, y = loader[0]
self.assertEqual(X.shape[3], 64)
self.assertEqual(X.shape[1], loader.nt + loader.ntau - 1)
self.assertEqual(m.shape, W.shape)
self.assertEqual(y.ndim, 2)
self.assertEqual(y.shape[1], 32)
def test_traintune(self):
loader = mt2.MT2('../data_derived/crcns-mt2',
nt=32,
nx=64,
ny=64,
split='traintune',
)
loader[0]
def test_tune(self):
_ = mt2.MT2('../data_derived/crcns-mt2', nt=32, split='tune')
def test_report(self):
_ = mt2.MT2('../data_derived/crcns-mt2', nt=32, split='report')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3301735
|
<filename>tools/bdf.py
"""
weegfx/tools/bdf.py: BDF font format parsing
Copyright (c) 2019 <NAME> <<EMAIL>>
Released under the 3-clause BSD license (see LICENSE)
"""
import os
import sys
import re
from typing import TextIO, List, Iterable, Any, Callable
from font import row_width, BBox
def bdf_width(width: int) -> int:
"""Calculates the width in bits of each row in the BDF from the actual witdth of a character in pixels."""
# NOTE: Lines in BDF BITMAPs are always stored in multiples of 8 bits
# (https://stackoverflow.com/a/37944252)
return -((-width) // 8) * 8
def nextline(stream: TextIO) -> str:
"""Returns the next non-whitespace line in `stream` (stripped) or an empty line on EOF."""
line = stream.readline()
while line.isspace() or line.startswith('COMMENT'):
line = stream.readline()
return line.strip()
class BdfBitmap:
def __init__(self, width: int, height: int, data: Iterable[int] = []):
self.data = list(data)
self.width = int(width)
self.bdf_width = row_width(self.width)
self.height = int(height)
@staticmethod
def parse_from(stream: TextIO, width: int, height: int, first_line: str = None) -> 'BdfBitmap':
"""Parses a BDF bitmap from the given stream, given its expected width and height(in pixels).
If `first_line` is present, uses it instead of fetching a first line from the stream.
"""
first_line = first_line if first_line is not None else nextline(stream)
if not first_line.startswith('BITMAP'):
raise SyntaxError(f'Expected BITMAP but found {first_line}')
bitmap = BdfBitmap(width, height)
bits_read = 0
bits_to_read = bitmap.bdf_width * bitmap.height
while bits_read < bits_to_read:
line_hex = ''.join(ch for ch in nextline(
stream) if not ch.isspace())
for col in range(0, len(line_hex), 2):
bitmap.data.append(int(line_hex[col:col+2], 16))
bits_read += 8
return bitmap
def __repr__(self) -> str:
return f'BdfBitmap({self.width}x{self.height})'
class BdfProperty(tuple):
"""The value of a BDF property as a tuple of ints/strings."""
VALUE_RE = re.compile(
r'\s*(?:(?P<int>-?\d+)|(?P<quoted>"[^"]*")|(?P<unquoted>.+))')
def __new__(cls, values_str: str):
"""Inits a BDF property value given the string representing it in the BDF file.
Value(s) are parsed as either ints or strings."""
return tuple.__new__(cls, cls._parse_value(values_str))
@classmethod
def _parse_value(cls, value_str: str) -> Any:
values = []
for match in cls.VALUE_RE.finditer(value_str):
if not match:
raise SyntaxError(
f'Expected int or string value, got {repr(value_str)}')
integer, quoted, unquoted = match.groups()
if integer is not None:
value = int(integer)
elif quoted is not None:
value = quoted[1:-1]
else:
value = unquoted
values.append(value)
return tuple(values)
class BdfRecord:
def __init__(self, type: str, args: Iterable):
self.type = str(type)
"""The type of record(FONT, CHAR...)"""
self.args = list(args)
"""The arguments present after the START < type > directive"""
self.items = {}
"""A mapping of `field -> <BdfProperty(field, args...) or BdfBitmap where field = 'BITMAP' > ."""
self.children = []
"""A list of `BdfRecord`s that are nested into this one."""
@staticmethod
def parse_from(stream: TextIO, expected_type: str = None, first_line: str = None) -> 'BdfRecord':
"""Parses a BDF record from the given stream.
If `expected_type` is present, throws an exception if the record is not of the given type.
If `first_line` is present, uses it instead of fetching a first line from the stream.
"""
first_line = first_line if first_line is not None else nextline(stream)
expected_start_tag = 'START' + expected_type if expected_type else ''
if not first_line.startswith(expected_start_tag):
raise SyntaxError(
f'Expected {expected_start_tag} but found {first_line}')
this_start_tag, *this_args = first_line.split()
record = BdfRecord(this_start_tag[len('START'):], this_args)
def add_record_item(key, item):
if key in record.items:
raise SyntaxError(f'Repeated {key} in {record.type}')
else:
record.items[key] = item
line = nextline(stream)
while line and not line.startswith('END'):
if line.startswith('START'):
record.children.append(
BdfRecord.parse_from(stream, first_line=line))
else:
if line.startswith('BITMAP'):
try:
bmp_width, bmp_height, *bmp_off = record.items['BBX']
except KeyError:
raise SyntaxError(
'Expected character BBX before BITMAP')
bitmap = BdfBitmap.parse_from(
stream, bmp_width, bmp_height, first_line=line)
add_record_item('BITMAP', bitmap)
else:
# TODO: If no space is present in `line` it is not a BITMAP line nor a key-value pair, so either:
# - The file is malformed
# - `BdfBitmap.parse_from()` did not read all lines in the bitmap
# Should likely throw an exception in both cases
# <KEY> <VALUE1> <VALUE2>...
key, values_str = line.split(maxsplit=1)
add_record_item(key, BdfProperty(values_str))
line = nextline(stream)
expected_end_tag = this_start_tag.replace('START', 'END')
if line != expected_end_tag:
got = repr(line) if line else 'EOF'
raise SyntaxError(f'Expected {expected_end_tag} but got {got}')
return record
def __repr__(self) -> str:
return f'BdfRecord({self.type})'
class BdfFont:
"""A font loaded from a "BDF"."""
def __init__(self, record: BdfRecord):
"""Inits the font given its FONT record."""
if record.type != 'FONT':
raise ValueError('Expected a FONT record in BDF')
if len(record.children) < 1 or record.children[0].type != 'PROPERTIES':
raise ValueError('Expected a PROPERTIES record in BDF')
properties = record.children[0]
def getval(record, key, default):
return record.items.get(key, (default,))[0]
self.bbox = BBox._make(record.items['FONTBOUNDINGBOX'])
"""The font's bounding box (W / H / OX / OY)."""
self.family = getval(properties, 'FAMILY_NAME', None)
"""Name of the font's family."""
self.weight = getval(properties, 'WEIGHT_NAME', None)
"""Name of the font's weight."""
self.logical_name = getval(record, 'FONT', None)
"""Logical (PostScript) name of the font."""
self.copyright = getval(properties, 'COPYRIGHT', None)
"""Copyright info on the font."""
# Table for faster lookups
self._chars = {child.items['ENCODING'][0]: child
for child in record.children if child.type == 'CHAR'}
def render_char(self, code: int) -> List[int]:
"""Renders the character with the given code to a list of bytes.
(`n` bytes per row, left-to-right, top-to-bottom).
Returns `None` if the character is missing from the font."""
char_record = self._chars.get(code, None)
if not char_record:
return None
char_bbox = char_record.items['BBX']
if char_bbox[0] != self.bbox.w or char_bbox[1] != self.bbox.h:
raise ValueError(
f'Character {code} has wrong BBX, font is not monospace!')
return char_record.items['BITMAP'].data
|
StarcoderdataPython
|
3244481
|
"""Utilities for CLIs."""
from argparse import ArgumentTypeError
from json import loads
def json_arg(value: str):
"""
Parse a JSON argument from the command line.
>>> json_arg('{"foo": "bar", "baz": [1, 2]}')
{'foo': 'bar', 'baz': [1, 2]}
>>> json_arg('{')
Traceback (most recent call last):
...
argparse.ArgumentTypeError: { is not in JSON format
"""
try:
return loads(value)
except ValueError:
raise ArgumentTypeError('%s is not in JSON format' % value)
|
StarcoderdataPython
|
3213842
|
<reponame>ffreemt/gpt3-api
"""Test gpt3_api."""
from gpt3_api import __version__
from gpt3_api import gpt3_api
def test_version():
"""Test version."""
assert __version__ == "0.1.0"
def test_sanity():
"""Sanity check."""
try:
assert not gpt3_api()
except Exception:
assert True
|
StarcoderdataPython
|
1802715
|
<reponame>malaiwah/ocpp
"""Constants for ocpp tests."""
from custom_components.ocpp.const import (
CONF_CPID,
CONF_CSID,
CONF_HOST,
CONF_METER_INTERVAL,
CONF_MONITORED_VARIABLES,
CONF_PORT,
)
from ocpp.v16.enums import Measurand
MOCK_CONFIG = {
CONF_HOST: "127.0.0.1",
CONF_PORT: 9000,
CONF_CPID: "test_cpid",
CONF_CSID: "test_csid",
CONF_METER_INTERVAL: 60,
}
MOCK_CONFIG_2 = {
Measurand.current_export.value: True,
Measurand.current_import.value: True,
Measurand.current_offered.value: True,
Measurand.energy_active_export_register.value: True,
Measurand.energy_active_import_register.value: True,
Measurand.energy_reactive_export_register.value: True,
Measurand.energy_reactive_import_register.value: True,
Measurand.energy_active_export_interval.value: True,
Measurand.energy_active_import_interval.value: True,
Measurand.energy_reactive_export_interval.value: True,
Measurand.energy_reactive_import_interval.value: True,
Measurand.frequency.value: True,
Measurand.power_active_export.value: True,
Measurand.power_active_import.value: True,
Measurand.power_factor.value: True,
Measurand.power_offered.value: True,
Measurand.power_reactive_export.value: True,
Measurand.power_reactive_import.value: True,
Measurand.rpm.value: True,
Measurand.soc.value: True,
Measurand.temperature.value: True,
Measurand.voltage.value: True,
}
MOCK_CONFIG_DATA = {
CONF_HOST: "127.0.0.1",
CONF_PORT: 9000,
CONF_CPID: "test_cpid",
CONF_CSID: "test_csid",
CONF_METER_INTERVAL: 60,
CONF_MONITORED_VARIABLES: "Current.Export,Current.Import,Current.Offered,Energy.Active.Export.Register,Energy.Active.Import.Register,Energy.Reactive.Export.Register,Energy.Reactive.Import.Register,Energy.Active.Export.Interval,Energy.Active.Import.Interval,Energy.Reactive.Export.Interval,Energy.Reactive.Import.Interval,Frequency,Power.Active.Export,Power.Active.Import,Power.Factor,Power.Offered,Power.Reactive.Export,Power.Reactive.Import,RPM,SoC,Temperature,Voltage",
}
# separate entry for switch so tests can run concurrently
MOCK_CONFIG_SWITCH = {
CONF_HOST: "127.0.0.1",
CONF_PORT: 9001,
CONF_CPID: "test_cpid_2",
CONF_CSID: "test_csid_2",
CONF_METER_INTERVAL: 60,
CONF_MONITORED_VARIABLES: "Current.Export,Current.Import,Current.Offered,Energy.Active.Export.Register,Energy.Active.Import.Register,Energy.Reactive.Export.Register,Energy.Reactive.Import.Register,Energy.Active.Export.Interval,Energy.Active.Import.Interval,Energy.Reactive.Export.Interval,Energy.Reactive.Import.Interval,Frequency,Power.Active.Export,Power.Active.Import,Power.Factor,Power.Offered,Power.Reactive.Export,Power.Reactive.Import,RPM,SoC,Temperature,Voltage",
}
DEFAULT_NAME = "test"
|
StarcoderdataPython
|
5178255
|
<reponame>remytuyeras/pedigrad-library<filename>Pedigrad_py/PartitionCategory/efp.py
#------------------------------------------------------------------------------
#_epi_factorize_partition(partition): list
#------------------------------------------------------------------------------
'''
This function relabels the elements of a list with non-negative integers. It starts with the integer 0 and allocates a new label by increasing the previously allocated label by 1. The first element of the list always receives the label 0 and the highest integer used in the relabeling equals the length of the 'image' of the list decreased by 1 (see iop.py).
e.g. _epi_factorize_partition(['A',4,'C','C','a','A']) = [0, 1, 2, 2, 3, 0]
'''
from iop import _image_of_partition
def _epi_factorize_partition(partition):
#The relabeling depends on the cardinal of the image of the partition.
#Computing the cardinal of the image is roughly the same as computing
#the image itself.
the_image = _image_of_partition(partition)
#A space is allocated to contain the relabeled list.
epimorphism=list()
#If the i-th element of the list is the j-th element of the image
#then this element is relabelled by the integer j.
for i in range(len(partition)):
for j in range(len(the_image)):
if partition[i] == the_image[j]:
epimorphism.append(j)
break
#Returns the relabeled list.
return epimorphism
|
StarcoderdataPython
|
1707422
|
const_T = Hyper()
const_M = Hyper()
@Runtime([const_M, const_M, const_M], const_M)
def Update(prev, cur, offset):
return (prev + cur + offset) % 2
offset = Param(const_M)
do_anything = Param(2)
initial_tape = Input(const_M)[2]
tape = Var(const_M)[const_T]
for t in range(2):
tape[t].set_to(initial_tape[t])
for t in range(2, const_T):
if do_anything == 1:
tape[t].set_to(Update(tape[t - 2], tape[t - 1], offset))
elif do_anything == 0:
tape[t].set_to(tape[t - 1])
final_tape = Output(const_M)
final_tape.set_to(tape[const_T - 1])
|
StarcoderdataPython
|
342236
|
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
sharded probability functions class for simulating symplectic acyclic_graphs.
"""
import jax
import jax.numpy as jnp
import math
import numpy as np
from asic_la.sharded_probability_function import utils
from asic_la.sharded_probability_function import complex_workaround as cw
import asic_la.sharded_probability_function.abstract_sharded_probability_function as aswf
@jax.tree_util.register_pytree_node_class
class ShardedProbabilityFunction(aswf.AbstractShardedProbabilityFunction):
"""
Sharded probability function that is optimized for 8x128 ASIC memory lanes.
Attributes:
concrete_tensor: A possibly distributed jax-array representing the
probability function of a discrete system.
perm: List of ints; the elements in `perm` map the axes of the
distributed concrete tensor to the corresponding positions if the tensor
was a numpy array, in the following referred to as the "virtual tensor".
For example: consider a distributed array
with a single global and three local discretes. `perm=[2,3,1,0]`
would mean that:
global (distributed) axis maps to axis 2 of the virtual tensor
axes 0 of `concrete_tensor` maps to axis 3 of the virtual tensor
axes 1 of `concrete_tensor` maps to axis 1 of the virtual tensor
axes 2 of `concrete_tensor` maps to axis 0 of the virtual tensor
Note that the elements in `perm` do *not* correspond to any discrete labels.
The main purpose of having `perm` is to establish a persistent
mapping from `concrete_tensor` to a virtual tensor.
The function `ShardedProbabilityFunction.dot` performs "numpy-style"
contractions of building_blocks with `ShardedProbabilityFunction`, i.e. emulates numpy
behaviour of the form
```
def dot(self, matrix, axes):
n = len(axes)
return np.tensordot(self, matrix.reshape((2,) * (n * 2), [axes, range(n)])
```
Internally, `dot` uses a ASIC friendly complicated mechanism to perform
this operation. During the operation, the `concrete_tensor` needs to be
permuted, globally swapped, and so on. `perm` gets updated during all
these operations such that at any time the distributed array can be
mapped to its virtual counterpart. After the `dot` operation, the
elements in `perm` again map global array axes and local array axes
(i.e. axes of `concrete_tensor`) to the position where they would
be if the `dot` function had been performed in numpy.
distributed: Whether the state is distributed or not.
num_global_discretes: Number of global discretes. Equal to log_2(num_devices).
num_discretes: Number of total discretes.
num_local_discretes: Number of discretes that are represented by the local shard.
"""
def __init__(self, concrete_tensor, perm, num_global_discretes=None):
"""Initialize ShardedProbabilityFunction.
Args:
concrete_tensor: The ShardedDeviceArray that stores the amplitudes.
perm: List of ints of the permutation from the "virtual tensor" to
concrete_tensor.
num_global_discretes: Number of global discretes. If `None`, then
`num_global_discretes = int(np.log2(jax.device_count()))`.
"""
super().__init__(
concrete_tensor=concrete_tensor,
perm=perm,
num_global_discretes=num_global_discretes,
)
def discrete_dot(self, matrix, axes, target_ndiscretes=7):
raise NotImplementedError(
"method `discrete_dot` is not implemented " "in ShardedProbabilityFunction"
)
def dot(self, matrix, axes, target_ndiscretes=7):
"""
Dot a matrix on the given virtual axes.
Equivalent code for the "virtual tensor":
```
def dot(self, matrix, axes):
n = len(axes)
return np.tensordot(self, matrix.reshape((2,) * (n * 2), [axes, range(n, 2*n)])
```
Args:
matrix: A tuple of A (2**n, 2**n) float32 matrices where n is len(axes).
axes: Which virtual axes to dot with on this ShardedProbabilityFunction.
target_ndiscretes: An optional integer; `matrix` will be extended to a
(2**target_ndiscretes, 2**target_ndiscretes) shaped matrix to increase
performance of tensor contractions on ASIC.
Returns:
The new ShardedProbabilityFunction.
"""
new_tensor, perm = self._dot_helper(matrix, axes, target_ndiscretes)
new_perm = utils.remove_and_reduce(perm, axes)
new_perm = (
new_perm[: self.num_global_discretes]
+ tuple(range(self.num_discretes - len(axes), self.num_discretes))
+ new_perm[self.num_global_discretes :]
)
return ShardedProbabilityFunction(new_tensor, new_perm, self.num_global_discretes)
def transpose(self, perm):
"""Transpose the virtual tensor according to the given permutation.
Note: This method only updates the perm attribute, as calls no JAX code.
Args:
perm: The given permutation.
Returns:
The permutated ShardedProbabilityFunction.
"""
# Proof of how this works
# perm is a permutation on the axes of the virtual tensor,
# i.e. the array obtained by mapping the distributed array
# to e.g. a numpy array using `self.perm`.
# `self.perm` maps the axes of the `concrete_tensor`
# to their respective virtual positions.
# `tmp` is the permutation that brings `self.perm` into
# this virtual (linearly increasing) order, i.e.
# self.perm[tmp] = [0,1,2,3,...]
tmp = utils.invert_permutation(self.perm)
# once we are in virtual order we can apply `perm` to `tmp`.
tmp = utils.permute(tmp, perm)
# Finally, we invert transform the virtual permutation `tmp`
# back to `self.perm`.
new_perm = utils.invert_permutation(tmp)
# Set tmp to be the new perm and don't touch the concrete_tensor.
return self.__class__(
self.concrete_tensor, new_perm, self.num_global_discretes
)
|
StarcoderdataPython
|
11264759
|
<reponame>ostrokach/proteinsolver<filename>proteinsolver/dashboard/ps_process.py
from __future__ import annotations
import logging
import multiprocessing as mp
from queue import Queue
from typing import Union
import torch
from proteinsolver.dashboard.msa_view import MSASeq
from proteinsolver.utils import array_to_seq, design_sequence
ctx = mp.get_context("spawn")
class ProteinSolverProcess(ctx.Process): # type: ignore
def __init__(self, net_class, state_file, data, num_designs, temperature=1.0, net_kwargs=None):
super().__init__(daemon=True)
self.net_class = net_class
self.state_file = state_file
self.net_kwargs = {} if net_kwargs is None else net_kwargs
self.output_queue: Queue[Union[Exception, MSASeq]] = ctx.Queue()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.data = data
self.num_designs = num_designs
self.temperature = temperature
self._cancel_event = ctx.Event()
def run(self) -> None:
logger = logging.getLogger(f"protein_solver_process.pid{self.pid}") # noqa
try:
net = self._init_network()
data = self.data.to(self.device)
except RuntimeError as e:
self.output_queue.put(e)
return
for i in range(self.num_designs):
if self.cancelled():
return
x, x_proba = design_sequence(
net,
data,
value_selection_strategy="multinomial",
num_categories=20,
temperature=self.temperature,
)
sum_proba = x_proba.mean().item()
sum_logproba = x_proba.log().mean().item()
seq = array_to_seq(x.data.numpy())
design = MSASeq(i, f"gen-{i + 1:05d}", seq, proba=sum_proba, logproba=sum_logproba)
self.output_queue.put(design)
del x, x_proba
def _init_network(self) -> torch.nn.Module:
net = self.net_class(**self.net_kwargs)
net = net.to(self.device)
net.load_state_dict(torch.load(self.state_file, map_location=self.device))
net = net.eval()
return net
def cancel(self) -> None:
self._cancel_event.set()
def cancelled(self) -> bool:
return self._cancel_event.is_set()
|
StarcoderdataPython
|
11253938
|
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils import choose
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_optimizer(model):
"""This is where users choose their optimizer and define the
hyperparameter space they'd like to search."""
optimizer_class = optim.SGD
lr = choose(np.logspace(-5, 0, base=10))
momentum = choose(np.linspace(0.1, .9999))
return optimizer_class(model.parameters(), lr=lr, momentum=momentum)
DATA_DIR = "../../data/mnist/"
MODEL_CLASS = ConvNet
LOSS_FN = F.nll_loss
HYPERPARAM_NAMES = ["lr", "momentum"] # This is unfortunate.
EPOCHS = 10
BATCH_SIZE = 64
POPULATION_SIZE = 15 # Number of models in a population
EXPLOIT_INTERVAL = 0.5 # When to exploit, in number of epochs
USE_SQLITE = True # If False, you'll need to set up a local Postgres server
|
StarcoderdataPython
|
11281380
|
<reponame>najuzilu/CDW-AWSRedshift<filename>sql_queries.py
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read("dwh.cfg")
SCHEMA_NAME = config.get("AWS", "SCHEMA_NAME")
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs"
songplay_table_drop = "DROP TABLE IF EXISTS songplays cascade;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create = """CREATE TABLE IF NOT EXISTS staging_events \
(artist varchar, auth varchar, firstName varchar, gender char, itemInSession int,\
lastName varchar, length float, level varchar, location varchar, method varchar, \
page varchar,registration timestamp, sessionId int, song varchar, status int, \
ts timestamp, userAgent varchar, userId int);"""
staging_songs_table_create = """CREATE TABLE IF NOT EXISTS staging_songs \
(num_songs int, artist_id varchar, artist_latitude float, \
artist_longitude float, artist_location varchar, artist_name varchar,\
song_id varchar, title varchar, duration float, year int);"""
songplay_table_create = """CREATE TABLE IF NOT EXISTS songplays \
(songplay_id int IDENTITY(0,1) PRIMARY KEY distkey, \
start_time timestamp NOT NULL, user_id int NOT NULL sortkey, \
level varchar, song_id varchar NOT NULL, artist_id varchar NOT NULL, \
session_id int NOT NULL, location varchar, user_agent varchar);"""
user_table_create = """CREATE TABLE IF NOT EXISTS users \
(user_id int PRIMARY KEY sortkey, first_name varchar, last_name varchar, \
gender char, level varchar);"""
song_table_create = """CREATE TABLE IF NOT EXISTS songs \
(song_id varchar PRIMARY KEY sortkey, title varchar NOT NULL, artist_id varchar,\
year int, duration float) diststyle all;"""
artist_table_create = """CREATE TABLE IF NOT EXISTS artists \
(artist_id varchar PRIMARY KEY sortkey, name varchar NOT NULL, location varchar, \
latitude float, longitude float) diststyle all;"""
time_table_create = """CREATE TABLE IF NOT EXISTS time \
(start_time timestamp PRIMARY KEY sortkey, hour int, day int, week int, \
month int, year int, weekday int);"""
# STAGING TABLES
staging_events_copy = """
copy {}.staging_events from '{}'
iam_role '{}'
json '{}'
timeformat 'epochmillisecs'
region 'us-west-2';
"""
staging_songs_copy = """
copy {}.staging_songs from '{}'
iam_role '{}'
json 'auto'
region 'us-west-2';
"""
# FINAL TABLES
songplay_table_insert = f"""INSERT INTO {SCHEMA_NAME}.songplays \
(start_time, user_id, level, song_id, artist_id, session_id, \
location, user_agent) SELECT \
staging_events.ts as start_time, \
staging_events.userId as user_id, \
staging_events.level as level, \
staging_songs.song_id as song_id, \
staging_songs.artist_id as artist_id, \
staging_events.sessionId as session_id, \
staging_events.location as location, \
staging_events.userAgent as user_agent \
FROM {SCHEMA_NAME}.staging_events \
JOIN {SCHEMA_NAME}.staging_songs \
ON (staging_events.artist=staging_songs.artist_name AND \
staging_events.song=staging_songs.title)
WHERE staging_events.page='NextSong';"""
user_table_insert = f"""INSERT INTO {SCHEMA_NAME}.users \
(user_id, first_name, last_name, gender, level) SELECT \
DISTINCT(staging_events.userId) as user_id, \
staging_events.firstName as first_name, \
staging_events.lastName as last_name, \
staging_events.gender as gender, \
staging_events.level as level \
FROM {SCHEMA_NAME}.staging_events
WHERE user_id IS NOT NULL AND staging_events.page='NextSong';"""
song_table_insert = f"""INSERT INTO {SCHEMA_NAME}.songs \
(song_id, title, artist_id, year, duration) SELECT \
DISTINCT(staging_songs.song_id) as song_id, \
staging_songs.title as title, \
staging_songs.artist_id as artist_id, \
staging_songs.year as year, \
staging_songs.duration as duration \
FROM {SCHEMA_NAME}.staging_songs;"""
artist_table_insert = f"""INSERT INTO {SCHEMA_NAME}.artists \
(artist_id, name, location, latitude, longitude) SELECT \
DISTINCT(staging_songs.artist_id) as artist_id, \
staging_songs.artist_name as name, \
staging_songs.artist_location as location, \
staging_songs.artist_latitude as latitude, \
staging_songs.artist_longitude as longitude \
FROM {SCHEMA_NAME}.staging_songs;"""
time_table_insert = f"""INSERT INTO {SCHEMA_NAME}.time \
(start_time, hour, day, week, month, year, weekday) SELECT \
DISTINCT(songplays.start_time) as start_time, \
EXTRACT(hour from songplays.start_time) as hour, \
EXTRACT(day from songplays.start_time) as day, \
EXTRACT(week from songplays.start_time) as week, \
EXTRACT(month from songplays.start_time) as month, \
EXTRACT(year from songplays.start_time) as year, \
EXTRACT(dow from songplays.start_time) as weekday \
FROM {SCHEMA_NAME}.songplays;"""
# QUERY LISTS
create_table_queries = [
staging_events_table_create,
staging_songs_table_create,
songplay_table_create,
user_table_create,
artist_table_create,
song_table_create,
time_table_create,
]
drop_table_queries = [
songplay_table_drop,
staging_events_table_drop,
staging_songs_table_drop,
user_table_drop,
song_table_drop,
artist_table_drop,
time_table_drop,
]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [
songplay_table_insert,
user_table_insert,
song_table_insert,
artist_table_insert,
time_table_insert,
]
|
StarcoderdataPython
|
8033411
|
from spt3g.core.load_pybindings import load_pybindings
load_pybindings(__name__, __path__)
from .ARCExtractor import UnpackACUData, UnpackTrackerData, DecryptFeatureBit, ARCExtract, ARCExtractMinimal
from .ARCHKExtractor import UnpackSPTpolHKData
from .GCPDataTee import GCPHousekeepingTee, GCPSignalledHousekeeping, GCPBoloDataTee, PagerWatchdog, DAQWatchdog
from .InfluxDB import UpdateDB
|
StarcoderdataPython
|
5143285
|
"""Computational models of the retina, such as phosphene and neural response models.
.. autosummary::
:toctree: _api
base
scoreboard
axon_map
watson2014
"""
from .base import BaseModel, NotBuiltError
from .watson2014 import (Watson2014ConversionMixin, dva2ret, ret2dva,
Watson2014DisplacementMixin)
from .scoreboard import ScoreboardModel
from .axon_map import AxonMapModel
__all__ = [
'AxonMapModel',
'BaseModel',
'dva2ret',
'NotBuiltError',
'ret2dva',
'ScoreboardModel',
'Watson2014ConversionMixin',
'Watson2014DisplacementMixin'
]
|
StarcoderdataPython
|
8007514
|
class Node:
def __init__(self):
self.parent = None
self.rank = 0
self.name = None
class DS:
def __init__(self):
self.top = Node()
self.top.parent = self.top
self.top.rank = 0
self.top.name = "rep"
self.array = []
def makeset(self, name):
x = Node()
x.parent = x
x.rank = 0
x.name = name
self.array.append(x)
return x
def findset(self, x: Node):
if x.parent is x:
return x
return self.findset(x.parent)
def union(self, x: Node, y: Node):
rx = self.findset(x)
ry = self.findset(y)
if rx.rank > ry.rank:
ry.parent = rx
elif ry.rank > rx.rank:
rx.parent = ry
else:
rx.parent = ry
ry.rank = ry.rank + 1
def printsets(self, x: Node):
temp = self.findset(x)
for i in self.array:
if self.findset(i) is temp:
print(i.name + " ", end=" ")
print()
class Edge:
def __init__(self, fro: int = -1, to: int = -1, weight: int = -1):
self.fro = fro
self.to = to
self.weight = weight
def main():
v = int(input("Enter the number of vertices"))
e = int(input("Enter the number of edges"))
arrayOfEdges = [Edge() for i in range(e)]
print("Enter edges in form-from:to:weight")
for i in range(e):
f, t, w = map(int, input().split())
arrayOfEdges[i] = Edge(f, t, w)
arrayOfEdges.sort(key=lambda c: c.weight)
x = DS()
for i in range(v):
x.makeset(i)
for edge in arrayOfEdges:
u = edge.fro
v = edge.to
foru = None
forv = None
for i in range(len(x.array)):
if x.array[i].name == u:
foru = x.array[i]
if x.array[i].name == v:
forv = x.array[i]
p1 = x.findset(foru)
p2 = x.findset(forv)
if p1 != p2:
x.union(p1, p2)
print("Edge from ", u, "to", v)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3393894
|
<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
from model.utils.layers import Shuffle3x3, Shuffle5x5
# Dual-shuffle Residual Block (DRB)
class DRB(nn.Module):
def __init__(self, n_features):
super(DRB, self).__init__()
self.features = n_features
self.groups = 4 #self.features / 3
self.shuffle3x3 = Shuffle3x3(in_channels = self.features, out_channels = self.features, groups = self.groups)
self.shuffle5x5 = Shuffle5x5(in_channels = self.features, out_channels = self.features, groups = self.groups)
self.outConv = nn.Conv2d(2*self.features, self.features, kernel_size = 3, stride = 1, padding = 1)
self.relu6 = nn.ReLU6()
def forward(self, x):
x_shuff3x3 = self.shuffle3x3(x)
x_shuff5x5 = self.shuffle5x5(x)
out = torch.cat((x_shuff3x3, x_shuff5x5), 1)
out = self.outConv(out)
return self.relu6(out)
|
StarcoderdataPython
|
5159288
|
"""
Contains configuration data for interacting with the source_data folder.
These are hardcoded here as this is meant to be a dumping
ground. Source_data is not actually user configurable.
For customized data loading, use mhwdata.io directly.
"""
from decimal import Decimal
supported_ranks = ('LR', 'HR')
"A mapping of all translations"
all_languages = {
'en': "English",
'ja': "日本語",
'fr': 'Français',
'it': 'Italiano',
'de': 'Deutsch',
'es': 'Español',
'pt': 'Português do Brasil',
'pl': 'Polski',
'ru': 'Pусский',
'ko': '한국어',
'zh': '繁體中文',
'ar': 'Arabe'
}
"A list of languages that require complete translations. Used in validation"
required_languages = ('en',)
"A list of languages that can be exported"
supported_languages = list(all_languages.keys())
"Languages that are designated as potentially incomplete"
incomplete_languages = []
"List of all possible armor parts"
armor_parts = ('head', 'chest', 'arms', 'waist', 'legs')
"Maximum number of items in a recipe"
max_recipe_item_count = 4
"Maximum number of skills in an armor piece/weapon"
max_skill_count = 2
GREAT_SWORD = 'great-sword'
LONG_SWORD = 'long-sword'
SWORD_AND_SHIELD = 'sword-and-shield'
DUAL_BLADES = 'dual-blades'
HAMMER = 'hammer'
HUNTING_HORN = 'hunting-horn'
LANCE = 'lance'
GUNLANCE = 'gunlance'
SWITCH_AXE = 'switch-axe'
CHARGE_BLADE = 'charge-blade'
INSECT_GLAIVE = 'insect-glaive'
LIGHT_BOWGUN = 'light-bowgun'
HEAVY_BOWGUN = 'heavy-bowgun'
BOW = 'bow'
"A list of all melee weapon types"
weapon_types_melee = (GREAT_SWORD, LONG_SWORD, SWORD_AND_SHIELD, DUAL_BLADES,
HAMMER, HUNTING_HORN, LANCE, GUNLANCE, SWITCH_AXE, CHARGE_BLADE,
INSECT_GLAIVE)
"A list of all bowgun weapon types"
weapon_types_gun = (LIGHT_BOWGUN, HEAVY_BOWGUN)
"A list of all ranged weapon types"
weapon_types_ranged = (*weapon_types_gun, BOW)
"A list of all weapon types"
weapon_types = (*weapon_types_melee, *weapon_types_ranged)
"Valid possible kinsect boosts"
valid_kinsects = ('sever', 'blunt', 'speed', 'element', 'health', 'stamina')
"Valid possible phial types (switchaxe and chargeblade)"
valid_phials = ('power', 'power element', 'dragon', 'poison', 'paralysis', 'exhaust', 'impact')
"Valid gunlance shelling types"
valid_shellings = ('normal', 'wide', 'long')
# notes are (white, purple, red, cyan, blue, green, orange, yellow)
"Valid notes for hunting horns"
valid_notes = ('W', 'P', 'R', 'C', 'B', 'G', 'O', 'Y')
icon_colors = [
"Gray", "White", "Lime", "Green", "Cyan", "Blue", "Violet", "Orange",
"Pink", "Red", "DarkRed", "LightBeige", "Beige", "DarkBeige", "Yellow",
"Gold", "DarkGreen", "DarkPurple"
]
"""A mapping of weapon type -> weapon multiplier to calculate true attack
These are decimal objects so that division can be exact"""
weapon_multiplier = {
GREAT_SWORD: Decimal("4.8"),
LONG_SWORD: Decimal("3.3"),
SWORD_AND_SHIELD: Decimal("1.4"),
DUAL_BLADES: Decimal("1.4"),
HAMMER: Decimal("5.2"),
HUNTING_HORN: Decimal("4.2"),
LANCE: Decimal("2.3"),
GUNLANCE: Decimal("2.3"),
SWITCH_AXE: Decimal("3.5"),
CHARGE_BLADE: Decimal("3.6"),
INSECT_GLAIVE: Decimal("3.1"),
LIGHT_BOWGUN: Decimal("1.3"),
HEAVY_BOWGUN: Decimal("1.5"),
BOW: Decimal("1.2")
}
|
StarcoderdataPython
|
6488782
|
<filename>tools/xml_chip_filter.py
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import pdb
from collections import defaultdict
##------------------------------------------------------------
## xml_chip_nose_filter *.xml dirs
## given x, y, dist: return noses within dist of x, y /(av nose)
## default is all noses between eyes
##
## ex: filter -out file -dist 2345 -pt [x y] <xmls>
##
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nFilter chips with given circle (center & radius). Defaults to average nose and half the distance between the eyes. Ex: filter -pt [23 45] -dist 40 <files>',
formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=50))
# parser.formatter.max_help_position = 50
parser.add_argument ('files', nargs='+')
parser.add_argument ('-pt', '--pt', default="",
help='"x y" for center of circle. Defaults to average of noses.')
parser.add_argument ('-d', '--distance', default=0,
help='Radius of circle. Defaults to half distance between eyes.')
parser.add_argument ('-o', '--output', default="",
help='Output file basename. Defaults to "part_<date><time>_"')
parser.add_argument ('--verbosity', type=int, default=1,
choices=[0, 1, 2], help=argparse.SUPPRESS)
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
verbose = args.verbosity
center = [0, 0]
if args.pt :
pt = (args.pt).split (' ')
if len (pt) != 2 :
print("Input error for -pt: needs exactly 2 numbers. Using default.")
elif not pt[0].isdigit () or not pt[1].isdigit () :
print("Input error for -pt: needs to be 2 numbers. Using default.")
else :
center[0] = int (pt[0])
center[1] = int (pt[1])
# pdb.set_trace ()
if not args.output :
args.output = datetime.datetime.now().strftime("chip_filtered_%Y%m%d_%H%M.xml")
distance = 0
if args.distance :
if not (args.distance).isdigit () :
print("Input error for --distance: needs to be a number. Using default.")
else :
distance = int (args.distance)
if verbose > 0 :
print("output : ", args.output)
print("center : ", center)
print("distance: ", distance)
xml_files = u.generate_xml_file_list (args.files)
u.filter_chips (xml_files, center, distance, args.output)
if __name__ == "__main__":
main (sys.argv)
|
StarcoderdataPython
|
8170707
|
import numpy as np
import librosa, math
from hparams import hparams as hp
def load_wav(filename):
x = librosa.load(filename, sr=hp.sample_rate)[0]
return x
def save_wav(y, filename) :
librosa.output.write_wav(filename, y, hp.sample_rate)
|
StarcoderdataPython
|
4801220
|
<gh_stars>0
from pyspark.sql import SparkSession
from pyspark.sql.functions import when, col
def main():
spark = SparkSession.builder.master("local[2]").appName("change_seat").getOrCreate()
path = "data/seat.csv"
df_seat = spark.read.option("header", "true").csv(path)
df_seat.show()
df_seat.printSchema()
# The best solution is directly change the id
df1 = df_seat.withColumn("new_id", when((((col("id")) % 2) == 1) & (col("id") == df_seat.count()), col("id"))
.when(((col("id")) % 2) == 0, col("id") - 1)
.otherwise(col("id") + 1)
) \
.drop("id").withColumnRenamed("new_id", "id")
df2 = df1.selectExpr("cast(id as int) id", "student").orderBy("id")
df2.show()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3490722
|
# Generated by Django 2.2.24 on 2021-07-30 14:49
import logging
import core.validators
import django.contrib.postgres.fields.jsonb
from django.db import migrations
logger = logging.getLogger(__name__)
schema = {'items': [{'properties': {'bbox': {'items': [{'contains': {'type': 'number'}, 'maxItems': 2, 'minItems': 2, 'type': 'array'}], 'maxItems': 2, 'minItems': 2, 'type': 'array'}, 'c': {'maxLength': 1, 'minLength': 1, 'type': 'string'}, 'confidence': {'maximum': 1, 'minimum': 0, 'type': 'number'}}, 'type': 'object'}], 'type': 'array'}
def batch_qs(qs, batch_size=1000):
total = qs.count()
for start in range(0, total, batch_size):
for e in qs[start:start+batch_size]:
yield e
def dummy(apps, se):
pass
def forward(apps, se):
LineTranscription = apps.get_model('core', 'LineTranscription')
for line_transcription in batch_qs(LineTranscription.objects.exclude(graphs__isnull=True)):
try:
line_transcription.graphs = [{
"c": x[0],
"bbox": [[x[1][0][0], x[1][0][1]], [x[1][2][0], x[1][2][1]]],
"confidence": float(x[2])
} for x in line_transcription.graphs]
except Exception as e:
logger.exception(e)
else:
line_transcription.save()
def backward(apps, se):
LineTranscription = apps.get_model('core', 'LineTranscription')
for line_transcription in batch_qs(LineTranscription.objects.exclude(graphs__isnull=True)):
try:
line_transcription.graphs = [
(x["c"],
[[x["bbox"][0][0],
x["bbox"][0][1]],
[],
[x["bbox"][1][0],
x["bbox"][1][1]],
[]],
x['confidence'])
for x in line_transcription.graphs]
except Exception as e:
logger.exception(e)
else:
line_transcription.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0056_auto_20211008_1250'),
]
operations = [
migrations.RunPython(forward, dummy),
migrations.AlterField(
model_name='linetranscription',
name='graphs',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[core.validators.JSONSchemaValidator(limit_value={'items': [{'properties': {'poly': {'items': [{'contains': {'type': 'number'}, 'maxItems': 2, 'minItems': 2, 'type': 'array'}], 'maxItems': 2, 'minItems': 2, 'type': 'array'}, 'c': {'maxLength': 1, 'minLength': 1, 'type': 'string'}, 'confidence': {'maximum': 1, 'minimum': 0, 'type': 'number'}}, 'type': 'object'}], 'type': 'array'})]),
),
migrations.RunPython(dummy, backward)
]
|
StarcoderdataPython
|
3353174
|
<reponame>danielbrenners/buzz-lightyear<filename>pi/emo_reco/helpers/nn/mxconv/__init__.py<gh_stars>0
# import the necessary packages
from .mxalexnet import MxAlexNet
from .mxvggnet import MxVGGNet
from .mxgooglenet import MxGoogLeNet
from .mxresnet import MxResNet
from .mxsqueezenet import MxSqueezeNet
|
StarcoderdataPython
|
9736425
|
<filename>react-flask-app/api/trip.py<gh_stars>0
import time
class Trip:
startTime = time.strftime('%A %B, %d %Y %H:%M:%S')
nSnaps = 0
tripDuration = 0
avSpeed = 0
avRPM = 0
avEngineLoad = 0
avCoolantTemp = 0
avThrottlePos = 0
snaps = []
def __init__(self):
self.startTime = time.strftime('%A %B, %d %Y %H:%M:%S')
self.nSnaps = 0
self.tripDuration = 0
self.avSpeed = 0
self.avRPM = 0
self.avEngineLoad = 0
self.avCoolantTemp = 0
self.avThrottlePos = 0
self.snaps = []
def update(self, data):
self.updateSpeed(data['speed'])
self.updateRPM(data['rpm'])
self.updateThrottlePos(data['throttlePos'])
self.updateEngineLoad(data['engineLoad'])
self.updateCoolantTemp(data['coolantTemp'])
self.updateDuration(data['startTime'])
self.addSnap(data)
self.incrementSnaps()
def getData(self):
return{
'startTime': self.startTime,
'duration': self.tripDuration,
'avSpeed': self.avSpeed,
'avRPM': self.avRPM,
'avEngineLoad': self.avEngineLoad,
'avCoolantTemp': self.avCoolantTemp,
'avThrottlePos': self.avThrottlePos,
'nSnaps': self.nSnaps,
'snaps': self.snaps
}
def updateDuration(self, startTime):
self.tripDuration = round(self.tripDuration + (time.time() - startTime),1)
def addSnap(self, chunk):
self.snaps.append(chunk)
def updateSpeed(self, speed):
self.avSpeed = round(
((self.avSpeed * self.nSnaps) + speed) / (self.nSnaps+1), 2)
def updateRPM(self, rpm):
self.avRPM = round(
(self.avRPM * self.nSnaps + rpm) / (self.nSnaps+1), 0)
def updateEngineLoad(self, engineLoad):
self.avEngineLoad = round((self.avEngineLoad * self.nSnaps +
engineLoad) / (self.nSnaps+1), 2)
def updateCoolantTemp(self, coolantTemp):
self.avCoolantTemp = round((self.avCoolantTemp * self.nSnaps +
coolantTemp) / (self.nSnaps+1), 2)
def updateThrottlePos(self, throttlePos):
self.avThrottlePos = round((self.avThrottlePos * self.nSnaps +
throttlePos) / (self.nSnaps+1), 2)
def incrementSnaps(self):
self.nSnaps = self.nSnaps+1
|
StarcoderdataPython
|
6628613
|
# Native Modules
import requests # For making HTTP requests
# External Modules
from bs4 import BeautifulSoup # BeautifulSoup is a webscraping module
# Classes
import cmd_args # Global commandline arguments
from link import Link # 'Link' class
from http_request_wrapper import HTTPRequestWrapper # Wrapper for HTTP errors (fatal and non)
## Webcrawler class ##
class Webcrawler:
def __init__(self):
self.websites = list()
# Parse a website for HTML
@HTTPRequestWrapper.nonfatal_errors
def scrape(self, url, timeout):
# Return the raw HTML of the website as text
response = requests.get(url, timeout=cmd_args.TIMEOUT, allow_redirects=True)
return response.text
# Reconstruct the URL of the website from the relative link and the URL
def reconstruct_url(self, url, website):
if not url.startswith('#') and not url.startswith("javascript") and not url.startswith("mailto"): # Filter out internal urls, javascript references, and mail servers
if url.startswith('/'): # Relative URLs
return website.url + url
else:
return url # Standard URL
# Find all links on a website
def find_links(self, raw_html, website):
soup = BeautifulSoup(raw_html, "html.parser")
# Get all the <a> (hyperlink) tags
a_tags = soup.find_all('a', href=True) # Parse the raw HTML for <a> tags with an 'href' (URL) element
print(f"Total of {len(a_tags)} <a> tags found\n") # Total amount of <a> tags (but not necessarily valid URLs) found
# Sort through all the URLs
for a_tag in a_tags:
if url := self.reconstruct_url(a_tag['href'], website): # Reconstruct the URL; if not an internal link, JS ref, or mail server, keep it
website.links.append( Link(a_tag.text, url) ) # Append a new 'Link' object (linked text, url, and status code)
@HTTPRequestWrapper.nonfatal_errors
def validate_links(self, links):
total = len(links)
for count, link in enumerate(links):
print(f"Progress: {count} / {total} valid links...{'Done!' if count == total else ''}", end='\r') # A simple progress bar
response = requests.head(link.url, timeout=cmd_args.TIMEOUT, allow_redirects=True) # Send simple HEAD request
link.status_code = response.status_code
# Parse and find links of a website
def scrape_for_links(self, website):
self.websites.append(website) # Add the website to the list of websites
raw_html = self.scrape(website.url, cmd_args.TIMEOUT) # Scrape the website for the HTML
self.find_links(raw_html, website) # Parse the HTML for <a> (link) tags
self.validate_links(website.links) # Validate all the links found
# Print all links
def print_links(self):
for website in self.websites:
website.links.sort( key = lambda link: link.status_code ) # Sort the links (by status code)
print(f"A total of {len(website.links)} links found on {website.url}")
print(f"============================{'=' * len(website.url)}")
website.print_links() # Print all the links
# End of Webcrawler Class
|
StarcoderdataPython
|
1604756
|
<reponame>dlshriver/dnnf<gh_stars>0
from __future__ import annotations
import itertools
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from dnnv.nn import OperationGraph, OperationTransformer
from dnnv.nn.utils import TensorDetails
from dnnv.properties import (
Add,
And,
Call,
Constant,
Exists,
Expression,
Forall,
LessThan,
LessThanOrEqual,
Multiply,
Network,
Or,
Subscript,
Symbol,
)
from dnnv.properties.visitors import ExpressionVisitor
class ReductionError(Exception):
pass
class Property:
@abstractmethod
def validate_counter_example(self, cex: np.ndarray) -> bool:
raise NotImplementedError()
class Reduction(ABC):
def __init__(self, *, reduction_error: Type[ReductionError] = ReductionError):
self.reduction_error = reduction_error
@abstractmethod
def reduce_property(self, phi: Expression) -> Iterator[Property]:
raise NotImplementedError()
class HPolyReductionError(ReductionError):
pass
class OpGraphMerger(OperationTransformer):
# TODO : merge common layers (e.g. same normalization, reshaping of input)
def __init__(self):
super().__init__()
self.output_operations = []
self.input_operations = {}
def merge(self, operation_graphs: Sequence[OperationGraph]):
for op_graph in operation_graphs:
for op in op_graph.output_operations:
self.output_operations.append(self.visit(op))
return OperationGraph(self.output_operations)
def visit_Input(self, operation):
input_details = (operation.dtype, tuple(operation.shape))
if input_details not in self.input_operations:
self.input_operations[input_details] = self.generic_visit(operation)
return self.input_operations[input_details]
class HPolyProperty(Property):
@classmethod
def build(
cls,
expr_details: ExpressionDetailsInference,
input_vars: Sequence[Expression],
output_vars: Sequence[Expression],
hpoly: Sequence[np.ndarray],
lb: np.ndarray,
ub: np.ndarray,
):
hpoly = list(hpoly)
variables = tuple(output_vars) + tuple(input_vars)
var_i_map = {v: i for i, v in enumerate(variables)}
var_offsets = [0]
for v in variables:
var_offsets.append(var_offsets[-1] + np.product(expr_details.shapes[v]))
for v in output_vars:
i = var_i_map[v]
offset = var_offsets[i]
shape = expr_details.shapes[v]
for idx in np.ndindex(*shape):
flat_idx = offset + np.ravel_multi_index(idx, shape)
if not np.isneginf(lb[flat_idx]):
hs = np.zeros((1, lb.shape[0] + 1))
hs[0, flat_idx] = -1
hs[0, -1] = -lb[flat_idx]
hpoly.append(hs)
if not np.isposinf(ub[flat_idx]):
hs = np.zeros((1, ub.shape[0] + 1))
hs[0, flat_idx] = 1
hs[0, -1] = ub[flat_idx]
hpoly.append(hs)
input_lower_bounds = []
input_upper_bounds = []
for v in input_vars:
i = var_i_map[v]
offset = var_offsets[i]
next_offset = var_offsets[i + 1]
shape = expr_details.shapes[v]
lower_bound = lb[offset : next_offset + 1].reshape(shape)
upper_bound = ub[offset : next_offset + 1].reshape(shape)
input_lower_bounds.append(lower_bound)
input_upper_bounds.append(upper_bound)
op_graphs = [n.value for n in sum((list(v.networks) for v in output_vars), [])]
merger = OpGraphMerger()
op_graph = merger.merge(op_graphs)
input_ops = tuple(merger.input_operations.values())
input_output_info = {
"input_names": [str(expr) for expr in input_vars],
"input_details": [expr_details[expr] for expr in input_vars],
"output_names": [str(expr) for expr in output_vars],
"output_details": [expr_details[expr] for expr in output_vars],
}
return cls(
op_graph,
hpoly,
input_lower_bounds,
input_upper_bounds,
input_ops,
input_output_info,
)
def __init__(
self,
op_graph: OperationGraph,
hpoly: Sequence[np.ndarray],
input_lower_bounds: Sequence[np.ndarray],
input_upper_bounds: Sequence[np.ndarray],
input_ops: Sequence[np.ndarray],
input_output_info: Dict[str, Any],
):
self.op_graph = op_graph
self.hpoly = hpoly
self.input_lower_bounds = input_lower_bounds
self.input_upper_bounds = input_upper_bounds
self.input_ops = input_ops
self.input_output_info = input_output_info
def __repr__(self):
strs = []
for i, (x, (shape, _)) in enumerate(
zip(
self.input_output_info["input_names"],
self.input_output_info["input_details"],
)
):
for idx in np.ndindex(*shape):
lb = self.input_lower_bounds[i][idx]
ub = self.input_upper_bounds[i][idx]
strs.append(f"{lb} <= {x}[{idx}] <= {ub}")
for hs in self.hpoly:
hs_str = []
offset = 0
for v, (shape, _) in itertools.chain(
zip(
self.input_output_info["output_names"],
self.input_output_info["output_details"],
),
zip(
self.input_output_info["input_names"],
self.input_output_info["input_details"],
),
):
for idx in np.ndindex(shape):
flat_idx = np.ravel_multi_index(idx, shape) + offset
c = hs[0, flat_idx]
if abs(c) <= 1e-100:
continue
hs_str.append(f"{c}*{v}[{idx}]")
offset = flat_idx + 1
b = hs[0, -1]
strs.append(" + ".join(hs_str) + f" <= {b}")
return "\n".join(strs)
def validate_counter_example(self, cex: np.ndarray) -> bool:
if np.any(np.isnan(cex)):
return False
if np.any(self.input_lower_bounds[0] > cex) or np.any(
self.input_upper_bounds < cex
):
return False
y = self.op_graph(cex)
if isinstance(y, tuple):
flat_y = np.hstack([y_.flatten() for y_ in y])
else:
flat_y = y.flatten()
flat_output = np.hstack([flat_y, cex.flatten()])
for hs in self.hpoly:
hy = hs[0, :-1] @ flat_output
b = hs[0, -1]
if np.any(hy > b):
return False
return True
def suffixed_op_graph(self) -> OperationGraph:
import dnnv.nn.operations as operations
output_shape = self.op_graph.output_shape[0]
axis = (0, 0, 1)[len(output_shape)]
if len(self.op_graph.output_operations) == 1:
new_output_op = self.op_graph.output_operations[0]
else:
if axis == 0:
output_operations = [
operations.Reshape(o, (-1,))
for o in self.op_graph.output_operations
]
else:
output_operations = [
operations.Flatten(o, axis=axis)
for o in self.op_graph.output_operations
]
new_output_op = operations.Concat(output_operations, axis=axis)
if axis == 0:
flat_input_ops = [operations.Reshape(o, (-1,)) for o in self.input_ops]
else:
flat_input_ops = [operations.Flatten(o, axis=axis) for o in self.input_ops]
new_output_op = operations.Concat([new_output_op] + flat_input_ops, axis=axis)
dtype = OperationGraph([new_output_op]).output_details[0].dtype
Wb = np.vstack(self.hpoly)
W = Wb[:, :-1].T.astype(dtype)
b = -Wb[:, -1].astype(dtype)
new_output_op = operations.Add(operations.MatMul(new_output_op, W), b)
new_output_op = operations.Relu(new_output_op)
k = len(self.hpoly)
W_mask = np.zeros((k, 2), dtype=dtype)
b_mask = np.zeros(2, dtype=dtype)
for i in range(k):
W_mask[i, 0] = 1
new_output_op = operations.Add(operations.MatMul(new_output_op, W_mask), b_mask)
new_op_graph = (
OpGraphMerger().merge([OperationGraph([new_output_op])]).simplify()
)
return new_op_graph
class ExpressionDetailsInference(ExpressionVisitor):
def __init__(self, reduction_error: Type[ReductionError] = ReductionError):
super().__init__()
self.reduction_error = reduction_error
# TODO : make types and shapes symbolic so we don't need to order expressions
self.shapes: Dict[Expression, Tuple[int, ...]] = {}
self.types: Dict[Expression, Union[Type, np.dtype]] = {}
def __getitem__(self, expression: Expression) -> TensorDetails:
return TensorDetails(self.shapes[expression], self.types[expression])
def visit_Add(self, expression: Add):
tmp_array: Optional[np.ndarray] = None
for expr in expression:
self.visit(expr)
shape = self.shapes[expr]
dtype = self.types[expr]
if tmp_array is None:
tmp_array = np.empty(shape, dtype=dtype)
else:
tmp_array = tmp_array + np.empty(shape, dtype=dtype)
if tmp_array is not None:
self.shapes[expression] = tuple(tmp_array.shape)
self.types[expression] = tmp_array.dtype
def visit_And(self, expression: And):
for expr in sorted(expression, key=lambda e: -len(e.networks)):
self.visit(expr)
def visit_Call(self, expression: Call):
if isinstance(expression.function, Network):
input_details = expression.function.value.input_details
if len(expression.args) != len(input_details):
raise self.reduction_error(
"Invalid property:"
f" Not enough inputs for network '{expression.function}'"
)
if len(expression.kwargs) > 0:
raise self.reduction_error(
"Unsupported property:"
" Executing networks with keyword arguments"
" is not currently supported"
)
for arg, d in zip(expression.args, input_details):
if arg in self.shapes:
arg_shape = self.shapes[arg]
assert arg_shape is not None
if any(
i1 != i2 and i2 > 0 for i1, i2 in zip(arg_shape, tuple(d.shape))
):
raise self.reduction_error(
f"Invalid property: variable with multiple shapes: '{arg}'"
)
self.shapes[arg] = tuple(i if i > 0 else 1 for i in d.shape)
self.types[arg] = d.dtype
self.visit(arg)
output_details = expression.function.value.output_details
if len(output_details) == 1:
self.shapes[expression] = output_details[0].shape
self.types[expression] = output_details[0].dtype
else:
raise RuntimeError(
"Multiple output operations are not currently supported"
" by this method."
" If you encounter this error, please open an issue on GitHub."
)
else:
raise self.reduction_error(
"Unsupported property:"
f" Function {expression.function} is not currently supported"
)
def visit_Constant(self, expression: Constant):
value = expression.value
if isinstance(value, np.ndarray):
self.shapes[expression] = value.shape
self.types[expression] = value.dtype
elif isinstance(value, (list, tuple)):
arr = np.asarray(value)
self.shapes[expression] = arr.shape
self.types[expression] = arr.dtype
else:
self.shapes[expression] = ()
self.types[expression] = type(value)
def visit_Multiply(self, expression: Multiply):
tmp_array: Optional[np.ndarray] = None
for expr in expression:
self.visit(expr)
shape = self.shapes[expr]
dtype = self.types[expr]
if tmp_array is None:
tmp_array = np.empty(shape, dtype=dtype)
else:
tmp_array = tmp_array * np.empty(shape, dtype=dtype)
if tmp_array is not None:
self.shapes[expression] = tuple(tmp_array.shape)
self.types[expression] = tmp_array.dtype
def visit_Or(self, expression: Or):
for expr in sorted(expression, key=lambda e: -len(e.networks)):
self.visit(expr)
def visit_Subscript(self, expression: Subscript):
self.visit(expression.expr)
if not expression.index.is_concrete:
return
index = expression.index.value
expr_shape = self.shapes[expression.expr]
for i, d in zip(index, expr_shape):
if not isinstance(i, slice) and i >= d:
raise self.reduction_error(f"Index out of bounds: {expression}")
self.shapes[expression] = tuple(np.empty(expr_shape)[index].shape)
self.types[expression] = self.types[expression.expr]
class HPolyPropertyBuilder:
def __init__(
self,
expr_details: ExpressionDetailsInference,
input_vars: List[Symbol],
output_vars: List[Expression],
):
self.expr_details = expr_details
self.input_vars = input_vars
self.output_vars = output_vars
self.variables = self.output_vars + self.input_vars
self.var_i_map = {v: i for i, v in enumerate(self.variables)}
self.var_offsets = [0]
for v in self.variables:
self.var_offsets.append(
self.var_offsets[-1] + np.product(self.expr_details.shapes[v])
)
num_input_vars = 0
for x in input_vars:
num_input_vars += np.product(self.expr_details.shapes[x])
num_output_vars = 0
for y in output_vars:
num_output_vars += np.product(self.expr_details.shapes[y])
self.num_input_vars = num_input_vars
self.num_output_vars = num_output_vars
self.num_vars = num_input_vars + num_output_vars
self.coefficients: Dict[
Expression, Union[np.ndarray, Sequence[np.ndarray]]
] = {}
self.var_indices: Dict[
Expression,
Union[
Tuple[Expression, np.ndarray], Sequence[Tuple[Expression, np.ndarray]]
],
] = {}
for v in self.variables:
shape = self.expr_details.shapes[v]
assert shape is not None
assert isinstance(shape, tuple)
var_ids = np.full(shape, self.var_i_map[v])
indices = np.array([i for i in np.ndindex(*shape)]).reshape(
shape + (len(shape),)
)
self.var_indices[v] = (var_ids, indices)
self.hpoly_constraints: List[np.ndarray] = []
self.interval_constraints: Tuple[np.ndarray, np.ndarray] = (
np.full(self.num_vars, -np.inf),
np.full(self.num_vars, np.inf),
)
def add_constraint(self, variables, indices, coeffs, b, is_open):
if is_open:
b = np.nextafter(b, b - 1)
if len(variables) > 1:
hs = np.zeros((1, self.num_vars + 1))
for v, i, c in zip(variables, indices, coeffs):
flat_index = self.var_offsets[v] + np.ravel_multi_index(
i, self.expr_details.shapes[self.variables[variables[v]]]
)
hs[0, flat_index] = c
hs[0, self.num_vars] = b
self.hpoly_constraints.append(hs)
else:
flat_index = self.var_offsets[variables[0]] + np.ravel_multi_index(
indices[0], self.expr_details.shapes[self.variables[variables[0]]]
)
coeff = coeffs[0]
if coeff > 0:
current_bound = self.interval_constraints[1][flat_index]
self.interval_constraints[1][flat_index] = min(b / coeff, current_bound)
elif coeff < 0:
current_bound = self.interval_constraints[0][flat_index]
self.interval_constraints[0][flat_index] = max(b / coeff, current_bound)
def build(self) -> HPolyProperty:
return HPolyProperty.build(
self.expr_details,
self.input_vars,
self.output_vars,
self.hpoly_constraints,
*self.interval_constraints,
)
class HPolyReduction(Reduction):
def __init__(
self,
negate: bool = True,
*,
reduction_error: Type[ReductionError] = HPolyReductionError,
):
super().__init__(reduction_error=reduction_error)
self.logger = logging.getLogger(__name__)
self.negate = negate
self.expression_details = ExpressionDetailsInference(
reduction_error=reduction_error
)
self._property_builder: Optional[HPolyPropertyBuilder] = None
def reduce_property(self, phi: Expression) -> Iterator[HPolyProperty]:
if isinstance(phi, Exists):
raise NotImplementedError(
"HPolyReduction currently supports only"
" universally quantified specifications"
)
expr = phi
while isinstance(expr, Forall):
expr = expr.expression
if self.negate:
expr = ~expr
canonical_expr = expr.canonical()
assert isinstance(canonical_expr, Or)
self.expression_details.visit(canonical_expr)
for expr, shape in self.expression_details.shapes.items():
if shape is None:
raise self.reduction_error(
f"Unable to infer shape for expression: {expr}"
)
for disjunct in canonical_expr:
self.logger.debug("DISJUNCT: %s", disjunct)
input_variables = disjunct.variables
output_variables = list(
set(
expr
for expr in disjunct.iter()
if isinstance(expr, Call)
and isinstance(expr.function, Network)
and expr in self.expression_details.shapes
)
)
self._property_builder = HPolyPropertyBuilder(
self.expression_details, list(input_variables), output_variables
)
self.visit(disjunct)
prop = self._property_builder.build()
yield prop
self._property_builder = None
def visit(self, expression: Expression):
method_name = f"visit_{type(expression).__name__}"
visitor = getattr(self, method_name, self.generic_visit)
return visitor(expression)
def generic_visit(self, expression: Expression):
raise NotImplementedError(
f"No visitor for expression type: {expression.__class__.__name__}"
)
def visit_Add(self, expression: Add):
coeffs = []
var_indices = []
assert self._property_builder is not None
for expr in expression:
self.visit(expr)
coeff = self._property_builder.coefficients[expr]
assert isinstance(coeff, np.ndarray)
coeffs.append(coeff)
var_indices.append(self._property_builder.var_indices[expr])
self._property_builder.var_indices[expression] = tuple(zip(*var_indices))
self._property_builder.coefficients[expression] = coeffs
def visit_Multiply(self, expression: Multiply):
coeff = None
variable = None
if not len(expression.expressions) == 2:
raise self.reduction_error("Property is not in canonical form.")
for expr in expression:
self.visit(expr)
if expr.is_concrete:
coeff = expr
elif variable is None:
variable = expr
else:
raise self.reduction_error(
"Non-linear properties are not currently supported"
)
assert coeff is not None
assert variable is not None
assert self._property_builder is not None
coeff_shape = self.expression_details.shapes[coeff]
variable_shape = self.expression_details.shapes[variable]
coeff_value = np.full(coeff_shape, coeff.value)
if coeff_shape != variable_shape:
try:
broadcast_shape = np.broadcast(
np.empty(coeff_shape), np.empty(variable_shape)
).shape
assert broadcast_shape == variable_shape # TODO: extend this
coeff_value = np.broadcast_to(coeff_value, broadcast_shape)
except ValueError:
raise self.reduction_error(
"Mismatched shapes in Multiply expression:"
f" {coeff_shape} and {variable_shape}"
)
self._property_builder.var_indices[
expression
] = self._property_builder.var_indices[variable]
self._property_builder.coefficients[expression] = coeff_value
def visit_Subscript(self, expression: Subscript):
self.visit(expression.expr)
self.visit(expression.index)
if not expression.index.is_concrete:
raise self.reduction_error("Unsupported property: Symbolic subscript index")
assert self._property_builder is not None
var_ids, indices = self._property_builder.var_indices[expression.expr]
new_var_ids = var_ids[expression.index.value]
new_indices = indices[expression.index.value]
self._property_builder.var_indices[expression] = (new_var_ids, new_indices)
def visit_And(self, expression: And):
for expr in sorted(expression, key=lambda e: -len(e.networks)):
self.visit(expr)
def visit_Call(self, expression: Call):
if expression not in self.expression_details.shapes:
raise self.reduction_error(f"Unknown shape for expression: {expression}")
def visit_Constant(self, expression: Constant):
pass
def _add_constraint(self, expression: Union[LessThan, LessThanOrEqual]):
self.visit(expression.expr1)
self.visit(expression.expr2)
lhs = expression.expr1
rhs = expression.expr2
lhs_shape = self.expression_details.shapes[lhs]
rhs_shape = self.expression_details.shapes[rhs]
assert self._property_builder is not None
lhs_vars, lhs_indices = self._property_builder.var_indices[lhs]
lhs_coeffs = self._property_builder.coefficients[lhs]
assert len(lhs_coeffs) == len(lhs_vars)
assert len(lhs_vars) == len(lhs_indices)
assert all(v.shape == lhs_vars[0].shape for v in lhs_vars[1:])
assert all(i.shape == lhs_indices[0].shape for i in lhs_indices[1:])
rhs_value = np.full(rhs_shape, rhs.value)
if lhs_shape != rhs_shape:
try:
broadcast_shape = np.broadcast(
np.empty(lhs_shape), np.empty(rhs_shape)
).shape
assert broadcast_shape == lhs_shape # TODO: extend this
rhs_value = np.broadcast_to(rhs_value, broadcast_shape)
except ValueError:
raise self.reduction_error(
f"Mismatched shapes in {type(expression).__name__} expression:"
f" {lhs_shape} and {rhs_shape}"
)
for idx in np.ndindex(lhs_vars[0].shape):
variables = tuple(v[idx] for v in lhs_vars)
indices = tuple(i[idx] for i in lhs_indices)
coeffs = tuple(c[idx] for c in lhs_coeffs)
self._property_builder.add_constraint(
variables,
indices,
coeffs,
rhs_value[idx],
is_open=isinstance(expression, LessThan),
)
def visit_LessThanOrEqual(self, expression: LessThanOrEqual):
self._add_constraint(expression)
def visit_LessThan(self, expression: LessThan):
self._add_constraint(expression)
def visit_Symbol(self, expression: Symbol):
pass
__all__ = [
"HPolyProperty",
"HPolyReduction",
"HPolyReductionError",
"Property",
"Reduction",
"ReductionError",
]
|
StarcoderdataPython
|
4913816
|
#####################################################################
# #
# /labscript_devices/PrawnBlaster/runviewer_parsers.py #
# #
# Copyright 2021, <NAME> #
# #
# This file is part of labscript_devices, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import labscript_utils.h5_lock # noqa: F401
import h5py
import numpy as np
import labscript_utils.properties as properties
class PrawnBlasterParser(object):
"""Runviewer parser for the PrawnBlaster Pseudoclocks."""
def __init__(self, path, device):
"""
Args:
path (str): path to h5 shot file
device (str): labscript name of PrawnBlaster device
"""
self.path = path
self.name = device.name
self.device = device
def get_traces(self, add_trace, clock=None):
"""Reads the shot file and extracts hardware instructions to produce
runviewer traces.
Args:
add_trace (func): function handle that adds traces to runviewer
clock (tuple, optional): clock times from timing device, if not
the primary pseudoclock
Returns:
dict: Dictionary of clocklines and triggers derived from instructions
"""
if clock is not None:
times, clock_value = clock[0], clock[1]
clock_indices = np.where((clock_value[1:] - clock_value[:-1]) == 1)[0] + 1
# If initial clock value is 1, then this counts as a rising edge
# (clock should be 0 before experiment) but this is not picked up
# by the above code. So we insert it!
if clock_value[0] == 1:
clock_indices = np.insert(clock_indices, 0, 0)
clock_ticks = times[clock_indices]
# get the pulse program
pulse_programs = []
with h5py.File(self.path, "r") as f:
# Get the device properties
device_props = properties.get(f, self.name, "device_properties")
conn_props = properties.get(f, self.name, "connection_table_properties")
self.clock_resolution = device_props["clock_resolution"]
self.trigger_delay = device_props["trigger_delay"]
self.wait_delay = device_props["wait_delay"]
# Extract the pulse programs
num_pseudoclocks = conn_props["num_pseudoclocks"]
for i in range(num_pseudoclocks):
pulse_programs.append(f[f"devices/{self.name}/PULSE_PROGRAM_{i}"][:])
# Generate clocklines and triggers
clocklines_and_triggers = {}
for pulse_program in pulse_programs:
time = []
states = []
trigger_index = 0
t = 0 if clock is None else clock_ticks[trigger_index] + self.trigger_delay
trigger_index += 1
clock_factor = self.clock_resolution / 2.0
last_instruction_was_wait = False
for row in pulse_program:
if row["reps"] == 0 and not last_instruction_was_wait: # WAIT
last_instruction_was_wait = True
if clock is not None:
t = clock_ticks[trigger_index] + self.trigger_delay
trigger_index += 1
else:
t += self.wait_delay
elif last_instruction_was_wait:
# two waits in a row means an indefinite wait, so we just skip this
# instruction.
last_instruction_was_wait = False
continue
else:
last_instruction_was_wait = False
for i in range(row["reps"]):
for j in range(1, -1, -1):
time.append(t)
states.append(j)
t += row["half_period"] * clock_factor
clock = (np.array(time), np.array(states))
for pseudoclock_name, pseudoclock in self.device.child_list.items():
for clock_line_name, clock_line in pseudoclock.child_list.items():
# Ignore the dummy internal wait monitor clockline
if clock_line.parent_port.startswith("GPIO"):
clocklines_and_triggers[clock_line_name] = clock
add_trace(
clock_line_name, clock, self.name, clock_line.parent_port
)
return clocklines_and_triggers
|
StarcoderdataPython
|
1691057
|
# Copyright 2021 Ringgaard Research ApS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SLING Twitter profile service"""
import json
import tweepy
class TwitterService:
def __init__(self):
# Read twitter credentials.
with open("local/keys/twitter.json", "r") as f:
apikeys = json.load(f)
# Connect to twitter.
auth = tweepy.OAuthHandler(apikeys["consumer_key"],
apikeys["consumer_secret"])
auth.set_access_token(apikeys["access_key"], apikeys["access_secret"])
self.api = tweepy.API(auth)
def handle(self, request):
params = request.params()
user = params["user"][0]
print("fetch twitter profile for", user)
return self.api.get_user(user)._json
|
StarcoderdataPython
|
9736475
|
<filename>openquake.hazardlib/openquake/hazardlib/gsim/douglas_stochastic_2013.py
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports
:class:`DouglasEtAl2013StochasticSD001Q200K005`
:class:`DouglasEtAl2013StochasticSD001Q200K020`
:class:`DouglasEtAl2013StochasticSD001Q200K040`
:class:`DouglasEtAl2013StochasticSD001Q200K060`
:class:`DouglasEtAl2013StochasticSD001Q600K005`
:class:`DouglasEtAl2013StochasticSD001Q600K020`
:class:`DouglasEtAl2013StochasticSD001Q600K040`
:class:`DouglasEtAl2013StochasticSD001Q600K060`
:class:`DouglasEtAl2013StochasticSD001Q1800K005`
:class:`DouglasEtAl2013StochasticSD001Q1800K020`
:class:`DouglasEtAl2013StochasticSD001Q1800K040`
:class:`DouglasEtAl2013StochasticSD001Q1800K060`
:class:`DouglasEtAl2013StochasticSD010Q200K005`
:class:`DouglasEtAl2013StochasticSD010Q200K020`
:class:`DouglasEtAl2013StochasticSD010Q200K040`
:class:`DouglasEtAl2013StochasticSD010Q200K060`
:class:`DouglasEtAl2013StochasticSD010Q600K005`
:class:`DouglasEtAl2013StochasticSD010Q600K020`
:class:`DouglasEtAl2013StochasticSD010Q600K040`
:class:`DouglasEtAl2013StochasticSD010Q600K060`
:class:`DouglasEtAl2013StochasticSD010Q1800K005`
:class:`DouglasEtAl2013StochasticSD010Q1800K020`
:class:`DouglasEtAl2013StochasticSD010Q1800K040`
:class:`DouglasEtAl2013StochasticSD010Q1800K060`
:class:`DouglasEtAl2013StochasticSD100Q200K005`
:class:`DouglasEtAl2013StochasticSD100Q200K020`
:class:`DouglasEtAl2013StochasticSD100Q200K040`
:class:`DouglasEtAl2013StochasticSD100Q200K060`
:class:`DouglasEtAl2013StochasticSD100Q600K005`
:class:`DouglasEtAl2013StochasticSD100Q600K020`
:class:`DouglasEtAl2013StochasticSD100Q600K040`
:class:`DouglasEtAl2013StochasticSD100Q600K060`
:class:`DouglasEtAl2013StochasticSD100Q1800K005`
:class:`DouglasEtAl2013StochasticSD100Q1800K020`
:class:`DouglasEtAl2013StochasticSD100Q1800K040`
:class:`DouglasEtAl2013StochasticSD100Q1800K060`
"""
from __future__ import division
import numpy as np
from scipy.constants import g
from math import sqrt
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
class DouglasEtAl2013StochasticSD001Q200K005(GMPE):
"""
Implements the GMPE for induced seismicity in Geothermal Areas derived
from stochastic simulations of ground motion constructed by:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., and <NAME>. (2013)
"Predicting Ground Motion for Induced Earthquakes in Geothermal Areas"
Bulleting of the Seismological Society of America, 103(3), 1875 - 1897
The stochastic model by Douglas et al. (2013) provides coefficients for
36 GMPEs, corresponding to different values of Stress Drop (1 bar, 10 bar,
100 bar), Attentuation Quality Factor Q (200, 600, 1800) and high-frequency
Kappa (0.005, 0.02, 0.04, 0.05 s).
The present model is implemented for Stress Drop 1 bar, Q 200 and
Kappa 0.005 s.
The models for each combination of Stress Drop, Q and Kappa
are implemented in subclasses, with only the median coefficients modified
in each subclass
Notes on implementation:
1) Aleatory uncertainty terms are not supplied for the stochastic
coefficients. Instead the adjusted aleatory uncertainty coefficients
derived from empirical observations are applied to the stochastic
model.
2) In the initial coefficient set for the stochastic model coefficients
for spectral accelerations up to 10 s are provided. However, the
empirical aleatory uncertainties are provided only for periods up
to 0.5012 s. Therefore, after consulation with J. Douglas, it is
decided to limit longest applicable spectral period to Sa (0.5 s),
rather than extrapolate the empricial aleatory coefficients to
longer periods.
3) For PGA and Sa (< 0.01 s) the aleatory uncertainty coefficients
for Sa (0.01 s) are applied (J. Douglas, pers. comm.)
4) For Sa (< 0.01 s) the coefficients are interpolated assuming
PGA occurs at Sa (0.005 s) (J. Dougla, pers. comm.). We therefore
limit the short period range to 0.005 s
"""
#: The supported tectonic region type is Geothermal because
#: the equations have been developed for geothermal regions
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.GEOTHERMAL
#: The supported intensity measure types are PGA, PGV, and SA, see table
#: 4.a, pages 22-23
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV,
SA
])
#: The supported intensity measure component is 'average horizontal', see
#: section entitiled "Empirical Analysis", paragraph 1
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: The supported standard deviations are total, inter and intra event, see
#: table 4.a, pages 22-23
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT,
const.StdDev.TOTAL
])
#: No additional site term is defined
REQUIRES_SITES_PARAMETERS = set()
#: The required rupture parameters are magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag',))
#: The required distance parameter is hypocentral distance
REQUIRES_DISTANCES = set(('rhypo',))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
C = self.COEFFS[imt]
C_SIG = self.SIGMA_COEFFS[imt]
mean = (self.get_magnitude_scaling_term(C, rup.mag) +
self.get_distance_scaling_term(C, dists.rhypo))
std_devs = self.get_stddevs(C_SIG, stddev_types, len(dists.rhypo))
#: Mean ground motions initially returned in cm/s/s (for PGA, SA)
#: and cm/s for PGV
if not isinstance(imt, PGV):
# Convert mean from log(cm/s/s) to g
mean = np.log(np.exp(mean) / (100. * g))
return mean, std_devs
def get_magnitude_scaling_term(self, C, mag):
"""
Returns the magnitude scaling term (equation 1)
"""
mval = mag - 3.0
return C['b1'] + C['b2'] * mval + C['b3'] * (mval ** 2.0) +\
C['b4'] * (mval ** 3.0)
def get_distance_scaling_term(self, C, rhyp):
"""
Returns the distance scaling term (equation 1)
"""
rval = rhyp + C['bh']
return C['b5'] * np.log(rval) + C['b6'] * rval
def get_stddevs(self, C_SIG, stddev_types, num_sites):
"""
Returns the standard deviations
N.B. In the paper, and with confirmation from the author, the
aleatory variability terms from the empirical model are used in
conjunction with the median coefficients from the stochastic model.
In the empirical model, coefficients for a single-station intra-event
sigma are derived. These are labeled as "phi". Inter-event coefficients
corresponding to two observed geothermal sequences (Soultz-Sous-Forets
and Basel) are also derived. The inter-event standard deviation is
therefore taken as the ordinary mean of the two inter-event
sigma terms
"""
stddevs = []
intra = C_SIG['phi']
inter = (C_SIG['tau_s'] + C_SIG['tau_b']) / 2.0
total = sqrt(intra ** 2.0 + inter ** 2.0)
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(total + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(inter + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(intra + np.zeros(num_sites))
return stddevs
# IMT > 0.5 seconds removed from the present implementation
# For median values, PGA is assumed equivalent to Sa (0.005 s)
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.911899 1.898031 -0.219505 0.021594 -1.329907 -0.016893 0.000000 0.272453
PGA 3.691836 1.401122 -0.219469 0.032238 -1.650703 -0.023499 0.110000 0.302012
0.005000 3.691836 1.401122 -0.219469 0.032238 -1.650703 -0.023499 0.110000 0.302012
0.010000 4.088129 1.396525 -0.212368 0.029869 -1.837709 -0.016626 0.000000 0.320983
0.020000 5.931361 1.372185 -0.197027 0.025748 -2.482646 -0.002140 0.950000 0.346455
0.030000 7.088704 1.335125 -0.182551 0.023845 -2.730126 -0.005237 2.010000 0.332423
0.040000 6.338391 1.291277 -0.174672 0.027038 -2.290429 -0.024085 1.810000 0.282412
0.050000 5.263431 1.250568 -0.178681 0.035678 -1.800646 -0.039319 1.160000 0.220561
0.075000 4.001717 1.220868 -0.222160 0.059438 -1.270830 -0.047952 0.350000 0.115105
0.100000 3.534052 1.274291 -0.269713 0.070710 -1.129130 -0.043285 0.140000 0.072277
0.150000 2.999563 1.453761 -0.336446 0.069763 -1.047842 -0.033311 0.010000 0.047346
0.200000 2.643874 1.642154 -0.373914 0.055965 -1.040540 -0.026098 0.000000 0.051645
0.300000 2.084980 1.965771 -0.401441 0.021288 -1.059220 -0.017376 0.000000 0.073924
0.400000 1.625988 2.209726 -0.398218 -0.008625 -1.086292 -0.012415 0.000000 0.092529
0.500000 1.238325 2.390165 -0.382327 -0.030766 -1.110481 -0.009430 0.000000 0.107816
""")
SIGMA_COEFFS = CoeffsTable(sa_damping=5, table="""
IMT phi tau_s tau_b
pgv 0.53545879 0.65762034 0.55823845
pga 0.57602321 0.90206692 0.63679205
0.0050 0.57602321 0.90206692 0.63679205
0.0100 0.57602321 0.90206692 0.63679205
0.0104 0.57689520 0.90103091 0.63516156
0.0108 0.57783868 0.89116278 0.63196573
0.0112 0.57908780 0.87922269 0.62955373
0.0116 0.58103639 0.86723876 0.62733527
0.0121 0.58375210 0.85574873 0.62533168
0.0126 0.58662287 0.84854712 0.62395612
0.0130 0.58949877 0.84463184 0.62280841
0.0136 0.59226021 0.84360137 0.62191123
0.0141 0.59452817 0.84469266 0.62127164
0.0146 0.59719947 0.84835823 0.62133721
0.0152 0.60028971 0.85435847 0.62185373
0.0158 0.60362608 0.86262581 0.62244475
0.0164 0.60729815 0.87254971 0.62366185
0.0170 0.61179020 0.88519712 0.62549912
0.0177 0.61694496 0.90037312 0.62785447
0.0184 0.62248778 0.91757430 0.63166420
0.0191 0.62856213 0.93643339 0.63664928
0.0198 0.63519785 0.95313131 0.64157563
0.0206 0.64030966 0.97297199 0.64821970
0.0214 0.64621927 0.98848791 0.65515479
0.0222 0.65175226 0.99862058 0.66084582
0.0231 0.65698191 0.99593666 0.66425118
0.0240 0.66244820 0.98019441 0.66579263
0.0249 0.66842953 0.96775821 0.66790592
0.0259 0.67341753 0.96397265 0.67111507
0.0269 0.67807259 0.95589014 0.67574265
0.0279 0.68314489 0.94744882 0.68205857
0.0290 0.68760782 0.94059228 0.68860503
0.0301 0.69337407 0.93669825 0.69561266
0.0313 0.69831040 0.93197705 0.70264339
0.0325 0.70284223 0.92599937 0.71146997
0.0337 0.70700450 0.91851520 0.71952427
0.0350 0.71188544 0.90651575 0.72670161
0.0364 0.71608136 0.89308687 0.73385972
0.0378 0.71825235 0.88306961 0.74283159
0.0393 0.71758703 0.87559117 0.75090906
0.0408 0.71556058 0.86479011 0.75939834
0.0424 0.71317989 0.84854056 0.76688850
0.0440 0.71032003 0.83184489 0.77087696
0.0457 0.70305223 0.81781543 0.77179373
0.0475 0.69566273 0.80345026 0.76990948
0.0493 0.68796458 0.78437968 0.76712272
0.0513 0.68056087 0.75940460 0.76354482
0.0532 0.67268550 0.73333365 0.75718439
0.0553 0.66257190 0.70820063 0.75383256
0.0574 0.65076968 0.68528905 0.74765620
0.0597 0.63769895 0.66285926 0.73605336
0.0620 0.62804828 0.63826332 0.72269728
0.0644 0.61769784 0.61642661 0.71055894
0.0669 0.60678498 0.59579093 0.69844462
0.0695 0.59586816 0.58030542 0.68934140
0.0722 0.58258386 0.56640014 0.68200191
0.0750 0.57090572 0.55440356 0.67200412
0.0779 0.55972340 0.54483417 0.66070232
0.0809 0.54989996 0.53656315 0.65087230
0.0840 0.54218255 0.52645499 0.63999870
0.0873 0.53296740 0.51733857 0.62434404
0.0906 0.52298265 0.50745354 0.60940104
0.0941 0.51472294 0.49884621 0.59478133
0.0978 0.50819820 0.49137806 0.57445883
0.1016 0.50224816 0.48410339 0.55462523
0.1055 0.49877218 0.47243431 0.53473527
0.1096 0.49429293 0.46249198 0.51691791
0.1138 0.48943841 0.45880771 0.50033901
0.1183 0.48311183 0.45618579 0.48086407
0.1228 0.47694259 0.45337171 0.45851082
0.1276 0.47249189 0.45057534 0.43827336
0.1325 0.46703351 0.44524279 0.41509572
0.1377 0.46067205 0.44242663 0.39124108
0.1430 0.45466430 0.44466659 0.36692056
0.1485 0.45042283 0.43986274 0.34495155
0.1543 0.44817331 0.43876880 0.31754765
0.1603 0.44238566 0.41177095 0.29155521
0.1665 0.43701439 0.41028471 0.27984172
0.1729 0.43321154 0.38811549 0.27612026
0.1796 0.43021356 0.38221438 0.26793366
0.1866 0.42772176 0.37672571 0.26812502
0.1938 0.42501528 0.37275559 0.26418376
0.2013 0.42682516 0.37548950 0.26481937
0.2091 0.42487345 0.39198400 0.27158879
0.2172 0.42496052 0.38891404 0.27302182
0.2256 0.42359637 0.38854359 0.28490270
0.2344 0.42421801 0.38753753 0.29370418
0.2435 0.42432813 0.36517965 0.30141728
0.2529 0.42269786 0.36397903 0.31056065
0.2627 0.42148251 0.36956366 0.31864898
0.2729 0.41637955 0.37677653 0.32300185
0.2834 0.41748144 0.38346451 0.33456689
0.2944 0.41965081 0.38089013 0.34241338
0.3058 0.42104089 0.37814987 0.35063710
0.3177 0.42270009 0.34736191 0.36544202
0.3300 0.42334190 0.35085452 0.37534349
0.3428 0.42847146 0.34871270 0.36351831
0.3560 0.42568535 0.36172585 0.40147718
0.3698 0.43095089 0.36448882 0.38073027
0.3841 0.42794813 0.37338013 0.41080089
0.3990 0.43214173 0.38505632 0.42015150
0.4145 0.43634237 0.37807826 0.40857767
0.4305 0.43677233 0.38083883 0.42110004
0.4472 0.43246639 0.38896171 0.42343771
0.4825 0.41364167 0.39946754 0.43467297
0.5012 0.41676790 0.40295239 0.43749117
""")
class DouglasEtAl2013StochasticSD001Q200K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 200 - Kappa 0.02
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.386232 2.002569 -0.243693 0.021568 -1.235992 -0.017009 0.000000 0.221558
PGA 2.514236 1.514284 -0.254728 0.034245 -1.368549 -0.026112 0.040000 0.251214
0.005000 2.514236 1.514284 -0.254728 0.034245 -1.368549 -0.026112 0.040000 0.251214
0.010000 2.526405 1.516447 -0.252939 0.033405 -1.373784 -0.026004 0.060000 0.248973
0.020000 2.845356 1.504545 -0.240064 0.029654 -1.498791 -0.022700 0.090000 0.277776
0.030000 3.648650 1.469251 -0.220269 0.025622 -1.754011 -0.018753 0.490000 0.310281
0.040000 4.099641 1.419897 -0.204483 0.025538 -1.813257 -0.022844 0.800000 0.300761
0.050000 4.017106 1.366809 -0.198300 0.030874 -1.672333 -0.031036 0.770000 0.259098
0.075000 3.432272 1.286782 -0.225641 0.054608 -1.302754 -0.041710 0.370000 0.145882
0.100000 3.095306 1.308094 -0.270743 0.068599 -1.145346 -0.040538 0.170000 0.085920
0.150000 2.689339 1.467746 -0.338665 0.069757 -1.048710 -0.032642 0.040000 0.046990
0.200000 2.370813 1.651499 -0.377802 0.056938 -1.023793 -0.026355 0.000000 0.045030
0.300000 1.859320 1.974641 -0.408833 0.023401 -1.029673 -0.018122 0.000000 0.058825
0.400000 1.414448 2.222739 -0.408549 -0.006291 -1.047952 -0.013325 0.000000 0.069394
0.500000 1.031582 2.409070 -0.394672 -0.028883 -1.066816 -0.010357 0.000000 0.078167
""")
class DouglasEtAl2013StochasticSD001Q200K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 200 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.788106 2.115736 -0.262747 0.018942 -1.183227 -0.016038 0.000000 0.182817
PGA 1.730746 1.641503 -0.286487 0.033214 -1.253266 -0.024278 0.000000 0.210349
0.005000 1.730746 1.641503 -0.286487 0.033214 -1.253266 -0.024278 0.000000 0.210349
0.010000 1.690656 1.645102 -0.285898 0.032735 -1.238134 -0.024761 0.000000 0.205189
0.020000 1.712960 1.642704 -0.281724 0.031506 -1.244281 -0.024784 0.010000 0.207743
0.030000 1.911187 1.628613 -0.268113 0.027751 -1.313453 -0.023501 0.050000 0.233174
0.040000 2.262531 1.592429 -0.248109 0.024380 -1.407567 -0.023393 0.190000 0.262727
0.050000 2.505791 1.538042 -0.231744 0.025439 -1.427319 -0.026479 0.290000 0.263940
0.075000 2.603442 1.407264 -0.233098 0.045395 -1.288275 -0.034832 0.270000 0.183375
0.100000 2.508251 1.373744 -0.270432 0.063396 -1.160310 -0.036461 0.170000 0.110077
0.150000 2.277599 1.491856 -0.338778 0.068431 -1.050338 -0.031594 0.050000 0.051968
0.200000 2.039725 1.665845 -0.379240 0.056574 -1.016114 -0.026178 0.000000 0.044308
0.300000 1.610954 1.985064 -0.412594 0.023940 -1.014323 -0.018377 0.000000 0.054123
0.400000 1.197240 2.234736 -0.414919 -0.005173 -1.025421 -0.013737 0.000000 0.059470
0.500000 0.826458 2.425072 -0.403468 -0.027633 -1.038408 -0.010818 0.000000 0.061644
""")
class DouglasEtAl2013StochasticSD001Q200K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 200 - Kappa 0.06
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -2.094492 2.210213 -0.273918 0.015167 -1.151849 -0.015183 0.000000 0.157734
PGA 1.221953 1.750749 -0.308085 0.030116 -1.207025 -0.022117 0.000000 0.183270
0.005000 1.221953 1.750749 -0.308085 0.030116 -1.207025 -0.022117 0.000000 0.183270
0.010000 1.175258 1.754990 -0.307949 0.029748 -1.189989 -0.022613 0.000000 0.177373
0.020000 1.152647 1.754891 -0.306030 0.029228 -1.179664 -0.023028 0.000000 0.175078
0.030000 1.179156 1.749522 -0.300167 0.027713 -1.186448 -0.023093 0.000000 0.181042
0.040000 1.314236 1.732339 -0.286382 0.024460 -1.227136 -0.022806 0.020000 0.203709
0.050000 1.515464 1.695458 -0.268091 0.022288 -1.269403 -0.023695 0.080000 0.226609
0.075000 1.806574 1.552540 -0.246343 0.034953 -1.234426 -0.029987 0.130000 0.202406
0.100000 1.910935 1.465001 -0.270167 0.055562 -1.155550 -0.032674 0.120000 0.134368
0.150000 1.873662 1.523761 -0.336936 0.066218 -1.053210 -0.030297 0.050000 0.060015
0.200000 1.722087 1.682213 -0.378975 0.055713 -1.013127 -0.025800 0.000000 0.045528
0.300000 1.385170 1.995187 -0.413693 0.023498 -1.008733 -0.018330 0.000000 0.053691
0.400000 1.013212 2.244722 -0.417310 -0.005358 -1.017300 -0.013748 0.000000 0.057483
0.500000 0.659877 2.436691 -0.407553 -0.027523 -1.025821 -0.010902 0.000000 0.056268
""")
class DouglasEtAl2013StochasticSD001Q600K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 600 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.018152 1.715318 -0.175091 0.017816 -1.216500 -0.011993 0.000000 0.181752
PGA 3.589080 1.206609 -0.155223 0.024240 -1.401654 -0.019345 0.110000 0.190322
0.005000 3.589080 1.206609 -0.155223 0.024240 -1.401654 -0.019345 0.110000 0.190322
0.010000 4.911415 1.188752 -0.138552 0.018628 -1.869825 -0.008674 0.590000 0.220320
0.020000 5.191928 1.135551 -0.109690 0.011910 -1.699357 -0.026272 0.840000 0.183679
0.030000 4.424745 1.090555 -0.111595 0.020546 -1.299011 -0.036744 0.280000 0.117423
0.040000 4.140453 1.078605 -0.132558 0.033221 -1.178108 -0.034707 0.130000 0.086542
0.050000 3.979718 1.086974 -0.157835 0.044612 -1.132899 -0.030961 0.090000 0.072361
0.075000 3.678728 1.148153 -0.218371 0.064142 -1.084879 -0.023978 0.060000 0.054940
0.100000 3.429408 1.236027 -0.267416 0.072596 -1.062416 -0.019770 0.050000 0.043964
0.150000 2.980985 1.434977 -0.333077 0.069592 -1.031349 -0.015240 0.000000 0.037687
0.200000 2.636753 1.628285 -0.368781 0.054682 -1.032007 -0.012047 0.000000 0.048223
0.300000 2.076595 1.952199 -0.392267 0.018512 -1.050101 -0.008015 0.000000 0.072718
0.400000 1.611798 2.190614 -0.385264 -0.011874 -1.072593 -0.005658 0.000000 0.089592
0.500000 1.217886 2.362928 -0.366572 -0.033591 -1.091887 -0.004292 0.000000 0.101936
""")
class DouglasEtAl2013StochasticSD001Q600K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 600 - Kappa 0.020
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.474885 1.853039 -0.216446 0.022361 -1.159570 -0.010882 0.000000 0.143047
PGA 2.342749 1.348400 -0.210078 0.032118 -1.205713 -0.017276 0.000000 0.154153
0.005000 2.342749 1.348400 -0.210078 0.032118 -1.205713 -0.017276 0.000000 0.154153
0.010000 2.338170 1.348362 -0.206283 0.030567 -1.198439 -0.017789 0.000000 0.151975
0.020000 2.706148 1.321070 -0.178654 0.021194 -1.287942 -0.018291 0.090000 0.183344
0.030000 3.016630 1.247722 -0.147054 0.017290 -1.278445 -0.023509 0.160000 0.168414
0.040000 3.092182 1.180248 -0.146904 0.027996 -1.200855 -0.026782 0.120000 0.123757
0.050000 3.113437 1.150349 -0.164788 0.040982 -1.147124 -0.026756 0.090000 0.093016
0.075000 3.078805 1.174853 -0.221704 0.063057 -1.088814 -0.022760 0.070000 0.059778
0.100000 2.966859 1.252330 -0.270653 0.072523 -1.061810 -0.019288 0.060000 0.044742
0.150000 2.661828 1.444789 -0.337856 0.070850 -1.027668 -0.015126 0.020000 0.033863
0.200000 2.363970 1.636500 -0.376007 0.057180 -1.015779 -0.012390 0.000000 0.040007
0.300000 1.853490 1.962959 -0.404872 0.022695 -1.023575 -0.008562 0.000000 0.056907
0.400000 1.405774 2.209079 -0.402015 -0.007610 -1.039558 -0.006239 0.000000 0.067075
0.500000 1.018525 2.390841 -0.385720 -0.030343 -1.054876 -0.004861 0.000000 0.074311
""")
class DouglasEtAl2013StochasticSD001Q600K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 600 - Kappa 0.040
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.849385 1.993331 -0.247386 0.022239 -1.131444 -0.009860 0.000000 0.118667
PGA 1.632396 1.500688 -0.256860 0.035012 -1.163818 -0.014136 0.000000 0.130186
0.005000 1.632396 1.500688 -0.256860 0.035012 -1.163818 -0.014136 0.000000 0.130186
0.010000 1.590524 1.503304 -0.255601 0.034326 -1.146756 -0.014684 0.000000 0.124409
0.020000 1.601125 1.497411 -0.247909 0.031791 -1.142438 -0.015191 0.000000 0.126477
0.030000 1.734189 1.470071 -0.220930 0.023079 -1.158830 -0.016597 0.000000 0.147039
0.040000 1.895940 1.400327 -0.190818 0.020210 -1.149369 -0.019357 0.000000 0.146678
0.050000 2.058032 1.319473 -0.184034 0.030082 -1.132438 -0.020935 0.020000 0.123518
0.075000 2.312639 1.243607 -0.224363 0.058026 -1.091850 -0.020564 0.060000 0.072872
0.100000 2.369735 1.287114 -0.272167 0.070363 -1.062386 -0.018425 0.060000 0.049408
0.150000 2.249018 1.461465 -0.340081 0.070421 -1.026095 -0.014902 0.030000 0.033795
0.200000 2.033724 1.648897 -0.379300 0.057443 -1.007859 -0.012485 0.000000 0.038504
0.300000 1.606988 1.974347 -0.411268 0.024159 -1.009716 -0.008804 0.000000 0.052462
0.400000 1.192377 2.224049 -0.412193 -0.005299 -1.020254 -0.006499 0.000000 0.057828
0.500000 0.819211 2.412271 -0.399162 -0.027965 -1.031211 -0.005096 0.000000 0.059291
""")
class DouglasEtAl2013StochasticSD001Q600K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 600 - Kappa 0.060
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -2.138493 2.106366 -0.265296 0.019400 -1.112992 -0.009315 0.000000 0.103976
PGA 1.148370 1.628251 -0.287747 0.033829 -1.144203 -0.012436 0.000000 0.115490
0.005000 1.148370 1.628251 -0.287747 0.033829 -1.144203 -0.012436 0.000000 0.115490
0.010000 1.100454 1.631890 -0.287275 0.033378 -1.126216 -0.012952 0.000000 0.109143
0.020000 1.075112 1.629870 -0.284137 0.032524 -1.113361 -0.013411 0.000000 0.105730
0.030000 1.094950 1.620201 -0.273619 0.029244 -1.112136 -0.013837 0.000000 0.110630
0.040000 1.183773 1.589289 -0.248480 0.022363 -1.119182 -0.014903 0.000000 0.124711
0.050000 1.298335 1.524158 -0.223210 0.021525 -1.113655 -0.016526 0.000000 0.125700
0.075000 1.600712 1.363323 -0.228312 0.048127 -1.083727 -0.018230 0.020000 0.087996
0.100000 1.797430 1.344676 -0.271421 0.065820 -1.062362 -0.017284 0.050000 0.057198
0.150000 1.843526 1.483017 -0.340243 0.069135 -1.024584 -0.014640 0.030000 0.035033
0.200000 1.716380 1.662307 -0.380380 0.056912 -1.003788 -0.012474 0.000000 0.038419
0.300000 1.382126 1.984756 -0.413535 0.023986 -1.004561 -0.008831 0.000000 0.052283
0.400000 1.009701 2.235505 -0.416269 -0.005047 -1.013269 -0.006494 0.000000 0.056224
0.500000 0.655144 2.426646 -0.405547 -0.027322 -1.020755 -0.005098 0.000000 0.054644
""")
class DouglasEtAl2013StochasticSD001Q1800K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 1800 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.089531 1.630072 -0.146313 0.012334 -1.168735 -0.008155 0.000000 0.124303
PGA 3.388708 1.122579 -0.119078 0.016528 -1.225495 -0.015096 0.000000 0.126719
0.005000 3.388708 1.122579 -0.119078 0.016528 -1.225495 -0.015096 0.000000 0.126719
0.010000 4.343610 1.083149 -0.087678 0.006140 -1.384188 -0.019949 0.280000 0.136126
0.020000 4.203717 1.040558 -0.081142 0.010225 -1.161755 -0.024475 0.030000 0.085515
0.030000 4.121190 1.039103 -0.101964 0.022050 -1.124788 -0.019930 0.020000 0.070847
0.040000 4.023197 1.050014 -0.127977 0.034235 -1.108946 -0.016813 0.030000 0.063947
0.050000 3.919910 1.068474 -0.154822 0.045171 -1.097666 -0.014763 0.040000 0.059170
0.075000 3.669707 1.138406 -0.216292 0.064143 -1.077873 -0.011797 0.060000 0.048840
0.100000 3.422799 1.229220 -0.265269 0.072228 -1.058209 -0.010340 0.050000 0.039674
0.150000 2.975243 1.430294 -0.330036 0.068526 -1.028421 -0.008782 0.000000 0.035051
0.200000 2.629737 1.624149 -0.364343 0.052880 -1.028892 -0.007153 0.000000 0.046455
0.300000 2.065365 1.946322 -0.384991 0.015780 -1.045247 -0.004754 0.000000 0.069751
0.400000 1.596682 2.180485 -0.375923 -0.014639 -1.065822 -0.003215 0.000000 0.083940
0.500000 1.200007 2.347832 -0.356093 -0.035852 -1.083534 -0.002317 0.000000 0.093695
""")
class DouglasEtAl2013StochasticSD001Q1800K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 1800 - Kappa 0.020
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.518957 1.789716 -0.200505 0.020891 -1.136435 -0.007080 0.000000 0.101454
PGA 2.278964 1.281239 -0.187155 0.029012 -1.160774 -0.010135 0.000000 0.106119
0.005000 2.278964 1.281239 -0.187155 0.029012 -1.160774 -0.010135 0.000000 0.106119
0.010000 2.271277 1.279336 -0.181408 0.026653 -1.148104 -0.010825 0.000000 0.102448
0.020000 2.510287 1.231997 -0.139437 0.012738 -1.153744 -0.013661 0.000000 0.113884
0.030000 2.773305 1.144600 -0.121580 0.018206 -1.126277 -0.015656 0.000000 0.090105
0.040000 2.949617 1.107808 -0.137090 0.031775 -1.108997 -0.015022 0.020000 0.072360
0.050000 3.040573 1.105361 -0.160743 0.043794 -1.098533 -0.013808 0.040000 0.063054
0.075000 3.060273 1.157222 -0.220450 0.064070 -1.075989 -0.011541 0.060000 0.049386
0.100000 2.960898 1.242219 -0.269693 0.072949 -1.057246 -0.010197 0.060000 0.039080
0.150000 2.658229 1.439044 -0.336715 0.070825 -1.025484 -0.008759 0.020000 0.031182
0.200000 2.360601 1.631946 -0.374442 0.056871 -1.014045 -0.007510 0.000000 0.038634
0.300000 1.848651 1.958532 -0.402005 0.021836 -1.021364 -0.005251 0.000000 0.055764
0.400000 1.398495 2.202972 -0.397681 -0.008785 -1.036128 -0.003747 0.000000 0.064827
0.500000 1.008973 2.382012 -0.380224 -0.031489 -1.050182 -0.002863 0.000000 0.070363
""")
class DouglasEtAl2013StochasticSD001Q1800K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 1800 - Kappa 0.040
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.876373 1.944053 -0.238715 0.022681 -1.117377 -0.006641 0.000000 0.088373
PGA 1.591976 1.445796 -0.242326 0.034488 -1.141256 -0.008294 0.000000 0.093962
0.005000 1.591976 1.445796 -0.242326 0.034488 -1.141256 -0.008294 0.000000 0.093962
0.010000 1.548842 1.447824 -0.240619 0.033647 -1.123375 -0.008823 0.000000 0.087449
0.020000 1.552707 1.439552 -0.229645 0.029698 -1.113939 -0.009382 0.000000 0.086773
0.030000 1.660404 1.398755 -0.193116 0.018145 -1.113617 -0.010550 0.000000 0.093327
0.040000 1.826075 1.309703 -0.168290 0.021147 -1.102613 -0.011673 0.000000 0.085376
0.050000 2.002537 1.237327 -0.173306 0.034828 -1.091085 -0.012009 0.010000 0.073042
0.075000 2.289213 1.207877 -0.223633 0.060750 -1.073209 -0.011037 0.050000 0.052352
0.100000 2.362841 1.269949 -0.272128 0.071572 -1.055776 -0.009992 0.060000 0.039929
0.150000 2.246155 1.454130 -0.339822 0.070809 -1.023775 -0.008719 0.030000 0.030636
0.200000 2.031745 1.643962 -0.378796 0.057605 -1.006470 -0.007687 0.000000 0.037296
0.300000 1.604800 1.970501 -0.410250 0.024070 -1.008472 -0.005511 0.000000 0.051876
0.400000 1.189085 2.219741 -0.410435 -0.005587 -1.018448 -0.004004 0.000000 0.056958
0.500000 0.814452 2.406676 -0.396598 -0.028355 -1.028546 -0.003097 0.000000 0.057671
""")
class DouglasEtAl2013StochasticSD001Q1800K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 001 - Q 1800 - Kappa 0.060
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -2.156614 2.065720 -0.260325 0.020572 -1.102852 -0.006529 0.000000 0.080242
PGA 1.119820 1.581392 -0.277976 0.034521 -1.129253 -0.007548 0.000000 0.086455
0.005000 1.119820 1.581392 -0.277976 0.034521 -1.129253 -0.007548 0.000000 0.086455
0.010000 1.071372 1.584734 -0.277309 0.034013 -1.110965 -0.008052 0.000000 0.079704
0.020000 1.044234 1.581690 -0.273399 0.032916 -1.097208 -0.008458 0.000000 0.075238
0.030000 1.055709 1.569219 -0.258863 0.027938 -1.091392 -0.008858 0.000000 0.076428
0.040000 1.129287 1.526973 -0.227602 0.019847 -1.090705 -0.009487 0.000000 0.080059
0.050000 1.245606 1.447151 -0.205176 0.022882 -1.084029 -0.010139 0.000000 0.075944
0.075000 1.594773 1.306652 -0.226203 0.052707 -1.071221 -0.010234 0.040000 0.056913
0.100000 1.796256 1.316835 -0.272206 0.068042 -1.056110 -0.009622 0.060000 0.041932
0.150000 1.848211 1.473278 -0.340626 0.069790 -1.024771 -0.008579 0.040000 0.030797
0.200000 1.714875 1.656796 -0.380397 0.057214 -1.002290 -0.007777 0.000000 0.037193
0.300000 1.380683 1.981208 -0.413167 0.024102 -1.003529 -0.005575 0.000000 0.051883
0.400000 1.007776 2.232006 -0.415518 -0.005016 -1.012018 -0.004014 0.000000 0.055722
0.500000 0.652442 2.422519 -0.404333 -0.027354 -1.019019 -0.003103 0.000000 0.053813
""")
class DouglasEtAl2013StochasticSD010Q200K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 200 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.605318 2.239348 -0.247962 0.007715 -1.495398 -0.019604 0.250000 0.324479
PGA 5.875280 1.745388 -0.275130 0.025412 -1.934005 -0.023881 0.550000 0.381508
0.005000 5.875280 1.745388 -0.275130 0.025412 -1.934005 -0.023881 0.550000 0.381508
0.010000 6.217695 1.729050 -0.262333 0.022410 -2.101025 -0.018003 0.290000 0.423520
0.020000 9.158260 1.673934 -0.245104 0.022102 -3.094216 0.002983 2.030000 0.445541
0.030000 9.978339 1.602046 -0.239414 0.030472 -3.143382 -0.007983 3.080000 0.394974
0.040000 8.264174 1.537990 -0.249128 0.044234 -2.363145 -0.033505 2.250000 0.306393
0.050000 6.872820 1.507449 -0.272239 0.057068 -1.786378 -0.048164 1.330000 0.221661
0.075000 5.482143 1.584770 -0.336688 0.064514 -1.270507 -0.051998 0.430000 0.105096
0.100000 4.910161 1.746579 -0.377144 0.051911 -1.138473 -0.045404 0.190000 0.073122
0.150000 4.182565 2.060525 -0.407661 0.017125 -1.061257 -0.034238 0.010000 0.071638
0.200000 3.708136 2.306609 -0.407169 -0.012862 -1.069274 -0.026148 0.000000 0.079692
0.300000 2.981990 2.633960 -0.375320 -0.050843 -1.108363 -0.016665 0.000000 0.097484
0.400000 2.414119 2.821389 -0.333948 -0.067658 -1.145763 -0.011656 0.000000 0.118908
0.500000 2.022012 2.929733 -0.294952 -0.072515 -1.203786 -0.007894 0.080000 0.139949
""")
class DouglasEtAl2013StochasticSD010Q200K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 200 - Kappa 0.020
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.096779 2.383856 -0.266292 0.002110 -1.354216 -0.019738 0.230000 0.245101
PGA 4.374535 1.917336 -0.307134 0.020805 -1.574418 -0.027204 0.450000 0.295194
0.005000 4.374535 1.917336 -0.307134 0.020805 -1.574418 -0.027204 0.450000 0.295194
0.010000 4.425233 1.915916 -0.303907 0.020040 -1.593433 -0.026748 0.510000 0.295897
0.020000 5.022820 1.881192 -0.284724 0.016837 -1.817288 -0.021213 0.670000 0.352047
0.030000 6.182574 1.804058 -0.267546 0.020760 -2.152706 -0.017533 1.390000 0.378696
0.040000 6.307568 1.719413 -0.264235 0.031634 -2.049173 -0.027098 1.520000 0.339448
0.050000 5.833904 1.654281 -0.275591 0.044755 -1.759576 -0.038042 1.190000 0.270470
0.075000 4.919003 1.649480 -0.331893 0.059787 -1.307550 -0.046727 0.510000 0.133370
0.100000 4.457979 1.778000 -0.375302 0.050641 -1.149814 -0.043367 0.250000 0.080117
0.150000 3.852046 2.076912 -0.410452 0.017520 -1.053774 -0.033966 0.070000 0.063501
0.200000 3.376321 2.322466 -0.412791 -0.011989 -1.028408 -0.027215 0.000000 0.062581
0.300000 2.676051 2.657490 -0.384656 -0.050445 -1.048271 -0.018226 0.000000 0.061316
0.400000 2.134605 2.855400 -0.345028 -0.068645 -1.085810 -0.012884 0.030000 0.069769
0.500000 1.736610 2.973288 -0.306396 -0.074972 -1.137275 -0.009075 0.110000 0.085640
""")
class DouglasEtAl2013StochasticSD010Q200K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 200 - Kappa 0.040
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.687488 2.527271 -0.274220 -0.006401 -1.270136 -0.018579 0.200000 0.190232
PGA 3.279024 2.097278 -0.327817 0.011371 -1.389128 -0.026188 0.300000 0.232413
0.005000 3.279024 2.097278 -0.327817 0.011371 -1.389128 -0.026188 0.300000 0.232413
0.010000 3.284303 2.099098 -0.326753 0.011143 -1.391236 -0.026116 0.370000 0.226683
0.020000 3.339767 2.088811 -0.320785 0.010447 -1.408222 -0.025946 0.390000 0.236114
0.030000 3.750529 2.047995 -0.303726 0.009511 -1.545612 -0.023438 0.560000 0.280550
0.040000 4.250578 1.969295 -0.288486 0.015107 -1.658691 -0.024347 0.830000 0.307671
0.050000 4.362890 1.881148 -0.285246 0.026259 -1.597237 -0.030367 0.830000 0.289479
0.075000 4.120221 1.770714 -0.322112 0.049365 -1.323147 -0.040091 0.510000 0.173247
0.100000 3.864185 1.834616 -0.367246 0.047074 -1.167873 -0.040033 0.290000 0.099092
0.150000 3.422233 2.098779 -0.408472 0.016665 -1.050839 -0.033293 0.090000 0.064081
0.200000 3.018385 2.338399 -0.413324 -0.012508 -1.012372 -0.027349 0.000000 0.059582
0.300000 2.386917 2.674846 -0.388335 -0.050871 -1.019597 -0.018788 0.000000 0.049715
0.400000 1.844803 2.879976 -0.351108 -0.069598 -1.039009 -0.013838 0.010000 0.044455
0.500000 1.443464 3.006837 -0.314023 -0.076915 -1.081092 -0.010058 0.090000 0.049779
""")
class DouglasEtAl2013StochasticSD010Q200K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 200 - Kappa 0.060
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.097634 2.637309 -0.274444 -0.014043 -1.231086 -0.017202 0.210000 0.157031
PGA 2.578048 2.240621 -0.336572 0.001667 -1.308454 -0.024230 0.240000 0.193963
0.005000 2.578048 2.240621 -0.336572 0.001667 -1.308454 -0.024230 0.240000 0.193963
0.010000 2.562845 2.243931 -0.336075 0.001470 -1.303756 -0.024302 0.300000 0.186931
0.020000 2.569763 2.239660 -0.334000 0.001646 -1.303550 -0.024424 0.340000 0.185469
0.030000 2.642638 2.222963 -0.326549 0.001279 -1.325062 -0.024235 0.350000 0.200132
0.040000 2.921247 2.177306 -0.312293 0.002562 -1.405149 -0.023560 0.460000 0.234883
0.050000 3.194821 2.101826 -0.300431 0.009233 -1.445578 -0.025567 0.570000 0.255596
0.075000 3.305510 1.926738 -0.313200 0.035011 -1.298833 -0.034756 0.430000 0.199228
0.100000 3.255483 1.913197 -0.355178 0.041352 -1.172368 -0.036714 0.280000 0.121239
0.150000 3.004214 2.123610 -0.403630 0.015702 -1.051662 -0.032377 0.100000 0.067149
0.200000 2.688200 2.353180 -0.411302 -0.013331 -1.007721 -0.027087 0.000000 0.059876
0.300000 2.156829 2.686815 -0.387751 -0.052032 -1.017363 -0.018479 0.000000 0.050439
0.400000 1.641840 2.894441 -0.351882 -0.070909 -1.029199 -0.013671 0.000000 0.040298
0.500000 1.182235 3.026362 -0.316201 -0.078545 -1.036206 -0.010984 0.000000 0.036532
""")
class DouglasEtAl2013StochasticSD010Q600K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 600 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.399226 1.976778 -0.208627 0.015319 -1.298302 -0.015378 0.130000 0.223024
PGA 5.552878 1.438059 -0.208984 0.029624 -1.555031 -0.020992 0.400000 0.244204
0.005000 5.552878 1.438059 -0.208984 0.029624 -1.555031 -0.020992 0.400000 0.244204
0.010000 7.303571 1.395409 -0.177926 0.020530 -2.157182 -0.008337 1.160000 0.298737
0.020000 7.023987 1.276633 -0.153224 0.027707 -1.745299 -0.032132 1.060000 0.206642
0.030000 6.188354 1.223851 -0.189734 0.052538 -1.342493 -0.039370 0.450000 0.117224
0.040000 5.858554 1.253123 -0.235363 0.067220 -1.221775 -0.036003 0.280000 0.080921
0.050000 5.650634 1.316139 -0.274299 0.073340 -1.174988 -0.031743 0.230000 0.063701
0.075000 5.218693 1.512609 -0.341146 0.069154 -1.119276 -0.024434 0.170000 0.049166
0.100000 4.823678 1.708044 -0.377812 0.053403 -1.082143 -0.020453 0.100000 0.052826
0.150000 4.184307 2.037115 -0.403919 0.016870 -1.050870 -0.015655 0.010000 0.067581
0.200000 3.711971 2.283654 -0.400351 -0.013652 -1.060171 -0.012098 0.000000 0.078407
0.300000 2.982740 2.601639 -0.364046 -0.051219 -1.093085 -0.007740 0.000000 0.096058
0.400000 2.426178 2.775209 -0.320150 -0.066530 -1.129523 -0.005234 0.020000 0.114668
0.500000 2.002393 2.870147 -0.280178 -0.069583 -1.169527 -0.003495 0.070000 0.132142
""")
class DouglasEtAl2013StochasticSD010Q600K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 600 - Kappa 0.02
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.277203 2.185642 -0.250640 0.012533 -1.213880 -0.013574 0.100000 0.159635
PGA 3.959118 1.671948 -0.271802 0.031204 -1.286048 -0.019660 0.160000 0.179971
0.005000 3.959118 1.671948 -0.271802 0.031204 -1.286048 -0.019660 0.160000 0.179971
0.010000 4.017162 1.665213 -0.264841 0.029351 -1.301096 -0.019577 0.230000 0.181727
0.020000 4.563020 1.588633 -0.220228 0.020149 -1.427232 -0.020657 0.440000 0.231305
0.030000 4.769998 1.441805 -0.205692 0.036515 -1.348874 -0.027225 0.410000 0.182017
0.040000 4.794690 1.373421 -0.236283 0.057856 -1.252219 -0.029232 0.320000 0.120403
0.050000 4.768092 1.386150 -0.273635 0.068505 -1.191185 -0.028265 0.260000 0.083776
0.075000 4.597890 1.542450 -0.342634 0.068098 -1.119271 -0.023541 0.190000 0.051075
0.100000 4.352686 1.728234 -0.381417 0.053625 -1.081155 -0.020010 0.140000 0.049248
0.150000 3.833814 2.054402 -0.411632 0.018525 -1.036691 -0.015778 0.050000 0.057232
0.200000 3.376930 2.304931 -0.411897 -0.011399 -1.021748 -0.012950 0.000000 0.059595
0.300000 2.677023 2.638076 -0.380831 -0.049952 -1.040550 -0.008698 0.000000 0.059030
0.400000 2.148759 2.828242 -0.338913 -0.067566 -1.079788 -0.005782 0.050000 0.066852
0.500000 1.738307 2.936455 -0.298828 -0.072794 -1.121944 -0.003786 0.120000 0.081141
""")
class DouglasEtAl2013StochasticSD010Q600K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 600 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.789537 2.379491 -0.271641 0.003383 -1.184514 -0.011619 0.120000 0.123741
PGA 3.038850 1.905216 -0.312136 0.023319 -1.222269 -0.016180 0.120000 0.141692
0.005000 3.038850 1.905216 -0.312136 0.023319 -1.222269 -0.016180 0.120000 0.141692
0.010000 3.023028 1.904740 -0.310129 0.022996 -1.214832 -0.016406 0.170000 0.135111
0.020000 3.087900 1.885074 -0.297838 0.020616 -1.226169 -0.016693 0.210000 0.144902
0.030000 3.306016 1.803091 -0.261877 0.017577 -1.249219 -0.018916 0.250000 0.173652
0.040000 3.492669 1.665705 -0.248320 0.033194 -1.220502 -0.022021 0.240000 0.155980
0.050000 3.648336 1.575797 -0.267915 0.051968 -1.187511 -0.023153 0.230000 0.119504
0.075000 3.797474 1.607881 -0.336743 0.063623 -1.120779 -0.021884 0.190000 0.062318
0.100000 3.743565 1.761896 -0.379258 0.051789 -1.083615 -0.019261 0.160000 0.051151
0.150000 3.397650 2.073851 -0.412679 0.017806 -1.029702 -0.015741 0.060000 0.056234
0.200000 3.019391 2.322874 -0.414958 -0.011765 -1.006630 -0.013207 0.000000 0.056985
0.300000 2.387884 2.661951 -0.387985 -0.050139 -1.015168 -0.009072 0.000000 0.047948
0.400000 1.852708 2.864039 -0.349409 -0.068621 -1.035729 -0.006484 0.020000 0.042075
0.500000 1.448716 2.985416 -0.311157 -0.075406 -1.073942 -0.004340 0.100000 0.046815
""")
class DouglasEtAl2013StochasticSD010Q600K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 600 - Kappa 0.06
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.157299 2.521549 -0.277618 -0.005771 -1.173632 -0.010296 0.160000 0.103647
PGA 2.426138 2.085137 -0.330841 0.012940 -1.198333 -0.014021 0.120000 0.120614
0.005000 2.426138 2.085137 -0.330841 0.012940 -1.198333 -0.014021 0.120000 0.120614
0.010000 2.400702 2.087152 -0.329965 0.012770 -1.189374 -0.014202 0.170000 0.112920
0.020000 2.395202 2.078493 -0.326412 0.012920 -1.182158 -0.014496 0.200000 0.109869
0.030000 2.445459 2.048721 -0.311341 0.011013 -1.184271 -0.015197 0.200000 0.122909
0.040000 2.579776 1.963805 -0.284702 0.013228 -1.185542 -0.017070 0.200000 0.139326
0.050000 2.728047 1.844045 -0.275243 0.027647 -1.167326 -0.018984 0.190000 0.130413
0.075000 3.042038 1.719669 -0.324075 0.054696 -1.120435 -0.019803 0.180000 0.077569
0.100000 3.137381 1.810541 -0.371739 0.048885 -1.080864 -0.018494 0.150000 0.055346
0.150000 2.981524 2.093499 -0.410666 0.016837 -1.028456 -0.015529 0.070000 0.056141
0.200000 2.690148 2.337571 -0.414405 -0.012744 -1.001874 -0.013184 0.000000 0.057222
0.300000 2.157144 2.676639 -0.388658 -0.051411 -1.013559 -0.008795 0.000000 0.049033
0.400000 1.640818 2.883541 -0.351944 -0.070088 -1.024134 -0.006368 0.000000 0.038112
0.500000 1.188612 3.012661 -0.315520 -0.077446 -1.032666 -0.005083 0.010000 0.033828
""")
class DouglasEtAl2013StochasticSD010Q1800K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 600 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.270388 1.840800 -0.173459 0.013618 -1.217478 -0.010519 0.070000 0.144994
PGA 5.210924 1.292567 -0.160512 0.024235 -1.317580 -0.016126 0.180000 0.149861
0.005000 5.210924 1.292567 -0.160512 0.024235 -1.317580 -0.016126 0.180000 0.149861
0.010000 6.212053 1.203080 -0.105015 0.009911 -1.463246 -0.022325 0.490000 0.155971
0.020000 6.012874 1.127925 -0.131939 0.035733 -1.226591 -0.024736 0.200000 0.083440
0.030000 5.893671 1.156908 -0.185876 0.057161 -1.183315 -0.019867 0.180000 0.066813
0.040000 5.757238 1.218068 -0.233651 0.069310 -1.164147 -0.016652 0.190000 0.056752
0.050000 5.594412 1.293592 -0.272710 0.074360 -1.143826 -0.014781 0.180000 0.049030
0.075000 5.194722 1.500166 -0.338908 0.069080 -1.106676 -0.012170 0.150000 0.042578
0.100000 4.817394 1.698307 -0.374583 0.052713 -1.077829 -0.010853 0.100000 0.049220
0.150000 4.177675 2.027663 -0.398342 0.015300 -1.046479 -0.009150 0.010000 0.064945
0.200000 3.702838 2.271508 -0.392561 -0.015579 -1.054285 -0.007221 0.000000 0.074407
0.300000 2.969130 2.581006 -0.353443 -0.052654 -1.084101 -0.004489 0.000000 0.087899
0.400000 2.401649 2.745922 -0.308500 -0.066850 -1.114613 -0.002864 0.010000 0.103110
0.500000 1.976276 2.834255 -0.268448 -0.068982 -1.152828 -0.001530 0.060000 0.118231
""")
class DouglasEtAl2013StochasticSD010Q1800K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 1800 - Kappa 0.02
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.328874 2.093100 -0.236729 0.015454 -1.182377 -0.008473 0.080000 0.108501
PGA 3.826758 1.563844 -0.247996 0.032719 -1.209190 -0.011652 0.080000 0.115103
0.005000 3.826758 1.563844 -0.247996 0.032719 -1.209190 -0.011652 0.080000 0.115103
0.010000 3.867316 1.552800 -0.236514 0.029180 -1.212293 -0.011958 0.140000 0.113953
0.020000 4.192960 1.425188 -0.174651 0.019976 -1.221323 -0.015210 0.180000 0.124076
0.030000 4.493093 1.291756 -0.192799 0.047693 -1.187651 -0.016284 0.180000 0.087425
0.040000 4.651621 1.287639 -0.236539 0.065028 -1.164574 -0.015190 0.190000 0.065066
0.050000 4.694163 1.337759 -0.275529 0.072278 -1.145164 -0.013964 0.190000 0.052351
0.075000 4.584276 1.524234 -0.343354 0.069212 -1.109348 -0.011766 0.180000 0.041097
0.100000 4.349370 1.717243 -0.381367 0.054140 -1.077856 -0.010627 0.140000 0.045250
0.150000 3.831766 2.046878 -0.410685 0.018671 -1.034827 -0.009263 0.050000 0.055689
0.200000 3.374756 2.297556 -0.410105 -0.011449 -1.019820 -0.007977 0.000000 0.058145
0.300000 2.673029 2.627803 -0.377335 -0.050075 -1.037184 -0.005346 0.000000 0.056193
0.400000 2.142555 2.812981 -0.334171 -0.067283 -1.074526 -0.003251 0.050000 0.061894
0.500000 1.730373 2.916070 -0.293558 -0.071862 -1.115004 -0.001721 0.120000 0.074430
""")
class DouglasEtAl2013StochasticSD010Q1800K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 1800 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.804310 2.314662 -0.267297 0.007093 -1.171683 -0.007235 0.120000 0.089725
PGA 2.983275 1.824241 -0.301354 0.027162 -1.189682 -0.009263 0.090000 0.097007
0.005000 2.983275 1.824241 -0.301354 0.027162 -1.189682 -0.009263 0.090000 0.097007
0.010000 2.965410 1.822366 -0.298627 0.026695 -1.181314 -0.009444 0.140000 0.089042
0.020000 2.999099 1.795352 -0.279256 0.021952 -1.176732 -0.010069 0.160000 0.092881
0.030000 3.158499 1.677095 -0.234751 0.021129 -1.170522 -0.011700 0.160000 0.097854
0.040000 3.390475 1.532751 -0.238059 0.044042 -1.157034 -0.012455 0.170000 0.082131
0.050000 3.587686 1.479256 -0.269543 0.060799 -1.142415 -0.012413 0.180000 0.064258
0.075000 3.785946 1.574919 -0.340568 0.066008 -1.107611 -0.011351 0.180000 0.043294
0.100000 3.734359 1.746595 -0.381165 0.052706 -1.076366 -0.010463 0.150000 0.045210
0.150000 3.397181 2.066709 -0.413158 0.018179 -1.028223 -0.009369 0.060000 0.055059
0.200000 3.019110 2.317323 -0.414922 -0.011478 -1.005578 -0.008296 0.000000 0.056311
0.300000 2.387054 2.656172 -0.387233 -0.049876 -1.013793 -0.005718 0.000000 0.046995
0.400000 1.850819 2.856136 -0.347922 -0.068234 -1.033281 -0.003952 0.020000 0.040144
0.500000 1.445371 2.974479 -0.309067 -0.074729 -1.070102 -0.002317 0.100000 0.043841
""")
class DouglasEtAl2013StochasticSD010Q1800K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 010 - Q 1800 - Kappa 0.06
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -1.164129 2.472701 -0.277072 -0.002441 -1.165144 -0.006598 0.160000 0.078713
PGA 2.401375 2.021365 -0.326031 0.017062 -1.181478 -0.008100 0.110000 0.087220
0.005000 2.401375 2.021365 -0.326031 0.017062 -1.181478 -0.008100 0.110000 0.087220
0.010000 2.383071 2.022707 -0.324905 0.016869 -1.175527 -0.008140 0.170000 0.078838
0.020000 2.366778 2.011492 -0.320272 0.016913 -1.163682 -0.008467 0.190000 0.073975
0.030000 2.394725 1.971296 -0.298124 0.013341 -1.155032 -0.009180 0.180000 0.079139
0.040000 2.503078 1.860052 -0.268241 0.018555 -1.144001 -0.010278 0.160000 0.080541
0.050000 2.675186 1.733416 -0.268437 0.037274 -1.132902 -0.010845 0.160000 0.071537
0.075000 3.031875 1.665585 -0.329913 0.059259 -1.104804 -0.010761 0.170000 0.047240
0.100000 3.137839 1.787652 -0.375751 0.050269 -1.075381 -0.010220 0.150000 0.045421
0.150000 2.989434 2.085540 -0.412033 0.017203 -1.029966 -0.009215 0.080000 0.054873
0.200000 2.690545 2.332560 -0.414978 -0.012490 -1.000968 -0.008353 0.000000 0.056782
0.300000 2.156812 2.672552 -0.388704 -0.051138 -1.012544 -0.005478 0.000000 0.048499
0.400000 1.639836 2.878667 -0.351628 -0.069741 -1.022513 -0.003854 0.000000 0.037069
0.500000 1.187019 3.006206 -0.314832 -0.076964 -1.030241 -0.003068 0.010000 0.032154
""")
class DouglasEtAl2013StochasticSD100Q200K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 200 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 2.324839 2.605780 -0.245000 -0.010095 -1.848593 -0.015707 0.770000 0.343939
PGA 8.158879 2.173797 -0.303404 0.005287 -2.363083 -0.019970 1.090000 0.428530
0.005000 8.158879 2.173797 -0.303404 0.005287 -2.363083 -0.019970 1.090000 0.428530
0.010000 8.853782 2.134252 -0.287264 0.004035 -2.668435 -0.010186 0.910000 0.500552
0.020000 12.942473 2.039290 -0.283197 0.016091 -3.956837 0.012329 3.430000 0.494293
0.030000 12.518440 1.960681 -0.298372 0.031082 -3.493679 -0.013055 3.960000 0.404167
0.040000 9.664712 1.931359 -0.325217 0.040121 -2.338914 -0.044850 2.370000 0.289277
0.050000 8.104683 1.964937 -0.354654 0.038966 -1.740614 -0.057019 1.330000 0.197091
0.075000 6.599562 2.186655 -0.397113 0.010796 -1.269169 -0.055330 0.440000 0.098251
0.100000 5.885144 2.413337 -0.401910 -0.018813 -1.148164 -0.046934 0.180000 0.077783
0.150000 4.973426 2.734479 -0.373557 -0.056450 -1.090615 -0.034214 0.000000 0.072902
0.200000 4.392482 2.927525 -0.334102 -0.073516 -1.117532 -0.025239 0.000000 0.083125
0.300000 3.555967 3.118749 -0.262818 -0.078628 -1.183826 -0.014927 0.020000 0.115761
0.400000 3.050238 3.189181 -0.210182 -0.069147 -1.282536 -0.008164 0.150000 0.144658
0.500000 2.686220 3.210507 -0.173196 -0.056596 -1.368027 -0.003961 0.280000 0.165988
""")
class DouglasEtAl2013StochasticSD100Q200K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 200 - Kappa 0.02
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 1.192307 2.774275 -0.247878 -0.020415 -1.586657 -0.018039 0.650000 0.237832
PGA 6.143483 2.393645 -0.318235 -0.007270 -1.865758 -0.025405 0.940000 0.303305
0.005000 6.143483 2.393645 -0.318235 -0.007270 -1.865758 -0.025405 0.940000 0.303305
0.010000 6.223932 2.385919 -0.314315 -0.007310 -1.893840 -0.024825 1.010000 0.309814
0.020000 7.395436 2.312158 -0.297741 -0.003325 -2.311054 -0.015025 1.480000 0.387875
0.030000 8.809595 2.203084 -0.297912 0.010720 -2.660964 -0.014302 2.450000 0.394692
0.040000 8.239240 2.123928 -0.313728 0.023209 -2.270418 -0.031732 2.150000 0.329825
0.050000 7.290576 2.099340 -0.337922 0.028303 -1.817399 -0.045198 1.470000 0.245893
0.075000 6.039440 2.233795 -0.386761 0.009680 -1.307969 -0.051047 0.580000 0.115030
0.100000 5.424826 2.438553 -0.398675 -0.018711 -1.155387 -0.045509 0.290000 0.075484
0.150000 4.583166 2.755714 -0.376066 -0.056662 -1.058678 -0.034872 0.070000 0.051392
0.200000 3.970968 2.954375 -0.339018 -0.074544 -1.041010 -0.027479 0.000000 0.042082
0.300000 3.140590 3.159942 -0.269243 -0.081692 -1.084405 -0.017543 0.030000 0.054047
0.400000 2.612667 3.242156 -0.215940 -0.073873 -1.167260 -0.010844 0.160000 0.077176
0.500000 2.245892 3.271182 -0.177449 -0.062250 -1.250837 -0.006324 0.310000 0.097320
""")
class DouglasEtAl2013StochasticSD100Q200K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 200 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.324034 2.920347 -0.238177 -0.030803 -1.439854 -0.017624 0.550000 0.172292
PGA 4.650034 2.597451 -0.315493 -0.022452 -1.589438 -0.025503 0.680000 0.223062
0.005000 4.650034 2.597451 -0.315493 -0.022452 -1.589438 -0.025503 0.680000 0.223062
0.010000 4.671582 2.596766 -0.314468 -0.022152 -1.594957 -0.025432 0.790000 0.217563
0.020000 4.798084 2.575099 -0.309272 -0.020982 -1.635139 -0.024755 0.830000 0.235535
0.030000 5.557493 2.500164 -0.299179 -0.014196 -1.877718 -0.020408 1.220000 0.291112
0.040000 6.148756 2.397408 -0.300585 -0.001277 -1.974914 -0.023559 1.570000 0.307954
0.050000 5.995187 2.317777 -0.314726 0.009409 -1.785848 -0.033387 1.370000 0.273661
0.075000 5.272165 2.319055 -0.364402 0.006274 -1.349497 -0.044766 0.670000 0.146677
0.100000 4.816352 2.474277 -0.386946 -0.018584 -1.171118 -0.042984 0.350000 0.085370
0.150000 4.127549 2.772932 -0.372761 -0.056995 -1.047844 -0.034619 0.090000 0.049354
0.200000 3.582874 2.970861 -0.337984 -0.075490 -1.016088 -0.027886 0.000000 0.034151
0.300000 2.773830 3.183724 -0.270486 -0.083629 -1.028815 -0.018856 0.000000 0.029088
0.400000 2.194582 3.276930 -0.217971 -0.077148 -1.075743 -0.013005 0.080000 0.041300
0.500000 1.802847 3.316343 -0.179194 -0.066798 -1.144702 -0.008539 0.230000 0.054250
""")
class DouglasEtAl2013StochasticSD100Q200K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 200 - Kappa 0.06
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.243760 3.020036 -0.225340 -0.037543 -1.371197 -0.016464 0.530000 0.134978
PGA 3.731088 2.742746 -0.304881 -0.033736 -1.469668 -0.023754 0.570000 0.177376
0.005000 3.731088 2.742746 -0.304881 -0.033736 -1.469668 -0.023754 0.570000 0.177376
0.010000 3.716108 2.744634 -0.304259 -0.033612 -1.463141 -0.023946 0.660000 0.170046
0.020000 3.736639 2.736668 -0.303250 -0.032646 -1.464693 -0.024120 0.720000 0.169964
0.030000 3.891647 2.705779 -0.298266 -0.030194 -1.511139 -0.023452 0.750000 0.193032
0.040000 4.376431 2.633772 -0.293080 -0.022386 -1.643959 -0.022241 0.990000 0.233269
0.050000 4.693648 2.543442 -0.296599 -0.011107 -1.664449 -0.025929 1.120000 0.247545
0.075000 4.494445 2.434735 -0.337168 -0.000239 -1.370047 -0.038550 0.700000 0.172198
0.100000 4.218543 2.520488 -0.369418 -0.018234 -1.188714 -0.039946 0.390000 0.100034
0.150000 3.687990 2.788017 -0.366988 -0.056695 -1.042816 -0.034124 0.090000 0.050771
0.200000 3.246029 2.981239 -0.334865 -0.075871 -1.012369 -0.027583 0.000000 0.035588
0.300000 2.547385 3.193296 -0.268101 -0.084616 -1.034418 -0.018055 0.000000 0.035487
0.400000 1.929091 3.291331 -0.216130 -0.078781 -1.047607 -0.013193 0.000000 0.038458
0.500000 1.420032 3.338090 -0.177763 -0.069273 -1.058550 -0.010544 0.020000 0.041357
""")
class DouglasEtAl2013StochasticSD100Q600K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 600 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 1.930467 2.282914 -0.232531 0.009811 -1.506516 -0.014738 0.480000 0.250677
PGA 7.550856 1.757832 -0.264748 0.027942 -1.791055 -0.020854 0.770000 0.291210
0.005000 7.550856 1.757832 -0.264748 0.027942 -1.791055 -0.020854 0.770000 0.291210
0.010000 9.846235 1.656046 -0.221924 0.023280 -2.534813 -0.007769 1.850000 0.370776
0.020000 8.661442 1.479972 -0.239039 0.055831 -1.765309 -0.038570 1.160000 0.204246
0.030000 7.786541 1.519310 -0.305741 0.070126 -1.388340 -0.041419 0.580000 0.101092
0.040000 7.378668 1.655445 -0.352406 0.063164 -1.275818 -0.036481 0.420000 0.070106
0.050000 7.067322 1.804172 -0.380635 0.049506 -1.223677 -0.031860 0.350000 0.063643
0.075000 6.404616 2.131057 -0.407507 0.012208 -1.152531 -0.024536 0.230000 0.066290
0.100000 5.841743 2.379046 -0.404887 -0.018207 -1.109157 -0.020542 0.120000 0.068159
0.150000 5.002262 2.704322 -0.370890 -0.055531 -1.085122 -0.015352 0.010000 0.072647
0.200000 4.415789 2.891066 -0.328853 -0.071690 -1.105752 -0.011463 0.000000 0.085337
0.300000 3.603809 3.064510 -0.255542 -0.074405 -1.170795 -0.006312 0.050000 0.116966
0.400000 3.064236 3.118271 -0.203224 -0.062765 -1.244979 -0.002904 0.150000 0.141752
0.500000 2.675408 3.127573 -0.167713 -0.048927 -1.312643 -0.000629 0.260000 0.158514
""")
class DouglasEtAl2013StochasticSD100Q600K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 600 - Kappa 0.02
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.859027 2.560333 -0.256716 -0.004586 -1.347298 -0.013226 0.350000 0.158683
PGA 5.483004 2.098021 -0.314032 0.014585 -1.434920 -0.019686 0.430000 0.188718
0.005000 5.483004 2.098021 -0.314032 0.014585 -1.434920 -0.019686 0.430000 0.188718
0.010000 5.590563 2.078050 -0.304781 0.013859 -1.462124 -0.019582 0.530000 0.199207
0.020000 6.323929 1.907170 -0.264365 0.022448 -1.598684 -0.022629 0.830000 0.254143
0.030000 6.360553 1.743506 -0.290349 0.051202 -1.428295 -0.030254 0.640000 0.170097
0.040000 6.280100 1.761941 -0.338737 0.056355 -1.307509 -0.030999 0.490000 0.102912
0.050000 6.156226 1.864363 -0.372860 0.046698 -1.237172 -0.029178 0.400000 0.074913
0.075000 5.764401 2.160858 -0.407244 0.011582 -1.149344 -0.023889 0.280000 0.063359
0.100000 5.332119 2.404752 -0.408221 -0.018371 -1.096195 -0.020483 0.180000 0.059761
0.150000 4.572257 2.736641 -0.378958 -0.055996 -1.043519 -0.016192 0.050000 0.047032
0.200000 3.979601 2.935795 -0.339802 -0.073476 -1.034428 -0.013040 0.000000 0.039410
0.300000 3.168438 3.133724 -0.268065 -0.079516 -1.080795 -0.007915 0.050000 0.053262
0.400000 2.638659 3.204443 -0.214189 -0.070215 -1.155198 -0.004100 0.180000 0.075824
0.500000 2.259140 3.222666 -0.176200 -0.057265 -1.226319 -0.001508 0.320000 0.093694
""")
class DouglasEtAl2013StochasticSD100Q600K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 600 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.119378 2.780484 -0.253594 -0.020673 -1.291456 -0.011053 0.340000 0.113228
PGA 4.259541 2.392809 -0.326052 -0.006053 -1.333966 -0.016023 0.330000 0.136785
0.005000 4.259541 2.392809 -0.326052 -0.006053 -1.333966 -0.016023 0.330000 0.136785
0.010000 4.251123 2.387990 -0.324301 -0.005490 -1.326403 -0.016327 0.410000 0.130354
0.020000 4.388952 2.343842 -0.311970 -0.004082 -1.352805 -0.016678 0.480000 0.150035
0.030000 4.707088 2.187035 -0.290198 0.010393 -1.368542 -0.020254 0.540000 0.177981
0.040000 4.887089 2.040138 -0.308165 0.032418 -1.305366 -0.023816 0.480000 0.144024
0.050000 4.988855 2.016285 -0.343966 0.037208 -1.248685 -0.024623 0.420000 0.103860
0.075000 4.943346 2.207905 -0.395758 0.010369 -1.152846 -0.022581 0.300000 0.066877
0.100000 4.703471 2.431518 -0.403352 -0.019166 -1.096833 -0.019933 0.210000 0.059521
0.150000 4.108059 2.757050 -0.378089 -0.057122 -1.029874 -0.016352 0.060000 0.044629
0.200000 3.587053 2.959047 -0.340715 -0.075120 -1.010851 -0.013446 0.000000 0.031430
0.300000 2.778855 3.170929 -0.271699 -0.082663 -1.023315 -0.009086 0.000000 0.026837
0.400000 2.216010 3.258911 -0.218633 -0.075493 -1.073414 -0.005610 0.100000 0.039524
0.500000 1.814270 3.291360 -0.179852 -0.064276 -1.133084 -0.003021 0.240000 0.052149
""")
class DouglasEtAl2013StochasticSD100Q600K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 600 - Kappa 0.06
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.385966 2.921215 -0.241518 -0.031192 -1.266778 -0.009682 0.370000 0.089260
PGA 3.476405 2.592604 -0.319988 -0.021982 -1.298680 -0.013552 0.320000 0.110342
0.005000 3.476405 2.592604 -0.319988 -0.021982 -1.298680 -0.013552 0.320000 0.110342
0.010000 3.452832 2.592519 -0.319177 -0.021680 -1.288799 -0.013792 0.400000 0.102343
0.020000 3.444480 2.577292 -0.317317 -0.020104 -1.275470 -0.014372 0.430000 0.100487
0.030000 3.559513 2.517009 -0.305692 -0.015947 -1.286041 -0.015339 0.440000 0.121158
0.040000 3.765142 2.381394 -0.296502 -0.000644 -1.278677 -0.018072 0.440000 0.135740
0.050000 3.934182 2.261618 -0.311935 0.015917 -1.240187 -0.020347 0.400000 0.118929
0.075000 4.152084 2.280749 -0.373161 0.008879 -1.157861 -0.020808 0.310000 0.073522
0.100000 4.087559 2.461745 -0.392945 -0.018970 -1.098281 -0.019254 0.220000 0.060250
0.150000 3.670521 2.771939 -0.374531 -0.057610 -1.024198 -0.016317 0.060000 0.044506
0.200000 3.250587 2.971728 -0.338438 -0.076056 -1.007970 -0.013277 0.000000 0.033263
0.300000 2.549104 3.185494 -0.269864 -0.084272 -1.029927 -0.008270 0.000000 0.033185
0.400000 1.929702 3.281582 -0.217537 -0.078047 -1.040710 -0.005898 0.000000 0.035687
0.500000 1.436761 3.324546 -0.179123 -0.068022 -1.055423 -0.004614 0.040000 0.038816
""")
class DouglasEtAl2013StochasticSD100Q1800K005(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 1800 - Kappa 0.005
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 1.659760 2.085934 -0.203331 0.016642 -1.342952 -0.010418 0.280000 0.159418
PGA 6.996713 1.528199 -0.215985 0.031603 -1.447689 -0.016366 0.390000 0.171628
0.005000 6.996713 1.528199 -0.215985 0.031603 -1.447689 -0.016366 0.390000 0.171628
0.010000 7.996946 1.329101 -0.149067 0.030138 -1.539869 -0.024932 0.650000 0.162791
0.020000 7.745417 1.303194 -0.239201 0.070601 -1.302264 -0.024200 0.360000 0.070283
0.030000 7.533536 1.452888 -0.310254 0.074700 -1.253909 -0.018972 0.340000 0.051370
0.040000 7.283422 1.621081 -0.354306 0.064965 -1.223874 -0.015936 0.330000 0.049267
0.050000 7.014133 1.780964 -0.380886 0.050399 -1.194817 -0.014241 0.300000 0.053286
0.075000 6.382674 2.115311 -0.405410 0.012315 -1.139575 -0.012050 0.210000 0.062320
0.100000 5.829893 2.363514 -0.401048 -0.018405 -1.100022 -0.010993 0.110000 0.064916
0.150000 4.998001 2.683528 -0.364277 -0.055598 -1.077174 -0.008931 0.010000 0.067943
0.200000 4.409395 2.862732 -0.320536 -0.071028 -1.094833 -0.006715 0.000000 0.078367
0.300000 3.577077 3.022605 -0.246269 -0.071985 -1.147776 -0.003428 0.030000 0.105969
0.400000 3.035435 3.067520 -0.194531 -0.059294 -1.218545 -0.000714 0.130000 0.127315
0.500000 2.627273 3.071980 -0.159897 -0.045152 -1.276219 0.000940 0.220000 0.141400
""")
class DouglasEtAl2013StochasticSD100Q1800K020(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 1800 - Kappa 0.02
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.756564 2.445973 -0.253403 0.003281 -1.282695 -0.007661 0.260000 0.104487
PGA 5.272572 1.949753 -0.301116 0.023568 -1.313398 -0.011023 0.260000 0.114391
0.005000 5.272572 1.949753 -0.301116 0.023568 -1.313398 -0.011023 0.260000 0.114391
0.010000 5.345043 1.917835 -0.283639 0.020702 -1.318254 -0.011704 0.340000 0.120689
0.020000 5.771054 1.657865 -0.239029 0.040756 -1.307160 -0.015882 0.370000 0.119677
0.030000 6.063955 1.584963 -0.300301 0.065591 -1.258102 -0.016140 0.350000 0.070969
0.040000 6.139691 1.688081 -0.349963 0.061473 -1.223802 -0.014780 0.340000 0.054120
0.050000 6.092356 1.825691 -0.380057 0.048709 -1.197646 -0.013483 0.330000 0.053389
0.075000 5.746209 2.145732 -0.409657 0.012215 -1.137992 -0.011748 0.260000 0.059090
0.100000 5.323348 2.394423 -0.409179 -0.017886 -1.090278 -0.010964 0.170000 0.057788
0.150000 4.572524 2.727532 -0.378733 -0.055465 -1.041507 -0.009563 0.050000 0.045419
0.200000 3.980240 2.924996 -0.338776 -0.072780 -1.031869 -0.008004 0.000000 0.037113
0.300000 3.168217 3.116511 -0.265955 -0.078095 -1.075671 -0.004554 0.050000 0.049664
0.400000 2.628906 3.180334 -0.211884 -0.067926 -1.143790 -0.001683 0.170000 0.070456
0.500000 2.248402 3.193443 -0.174358 -0.054350 -1.212779 0.000481 0.310000 0.086297
""")
class DouglasEtAl2013StochasticSD100Q1800K040(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 1800 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV 0.081975 2.712222 -0.257520 -0.015426 -1.262417 -0.006158 0.300000 0.080425
PGA 4.181635 2.297079 -0.326131 0.001468 -1.283519 -0.008255 0.260000 0.090545
0.005000 4.181635 2.297079 -0.326131 0.001468 -1.283519 -0.008255 0.260000 0.090545
0.010000 4.154262 2.289368 -0.323698 0.002146 -1.268003 -0.008716 0.320000 0.082240
0.020000 4.227258 2.223034 -0.302232 0.003032 -1.261366 -0.009833 0.340000 0.091578
0.030000 4.484681 2.013020 -0.282213 0.025868 -1.246485 -0.011863 0.340000 0.090374
0.040000 4.756849 1.902764 -0.318835 0.045593 -1.222495 -0.012411 0.340000 0.068900
0.050000 4.916688 1.936909 -0.358234 0.042875 -1.196738 -0.012258 0.330000 0.058287
0.075000 4.929939 2.185970 -0.402288 0.010777 -1.139932 -0.011315 0.280000 0.058738
0.100000 4.696966 2.421114 -0.406325 -0.019033 -1.091044 -0.010749 0.200000 0.057348
0.150000 4.109324 2.751228 -0.379171 -0.056885 -1.028808 -0.009828 0.060000 0.043924
0.200000 3.588184 2.953742 -0.341266 -0.074778 -1.009789 -0.008448 0.000000 0.030578
0.300000 2.779909 3.163968 -0.271766 -0.082047 -1.021491 -0.005690 0.000000 0.025361
0.400000 2.216651 3.248583 -0.218440 -0.074468 -1.069983 -0.003062 0.100000 0.037555
0.500000 1.814014 3.277280 -0.179677 -0.062794 -1.127646 -0.000993 0.240000 0.049294
""")
class DouglasEtAl2013StochasticSD100Q1800K060(
DouglasEtAl2013StochasticSD001Q200K005):
"""
Stress Drop 100 - Q 1800 - Kappa 0.04
"""
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT b1 b2 b3 b4 b5 b6 bh total
PGV -0.408664 2.875644 -0.246923 -0.027865 -1.247456 -0.005569 0.340000 0.066551
PGA 3.437980 2.525510 -0.323993 -0.016500 -1.270032 -0.006956 0.280000 0.078198
0.005000 3.437980 2.525510 -0.323993 -0.016500 -1.270032 -0.006956 0.280000 0.078198
0.010000 3.396878 2.524213 -0.323032 -0.016096 -1.253095 -0.007381 0.340000 0.069395
0.020000 3.386952 2.504298 -0.320406 -0.014186 -1.238463 -0.007826 0.370000 0.065232
0.030000 3.452606 2.418102 -0.302310 -0.008615 -1.224039 -0.009038 0.340000 0.073601
0.040000 3.643587 2.252296 -0.296655 0.011741 -1.207910 -0.010340 0.320000 0.072091
0.050000 3.858735 2.151563 -0.322469 0.026514 -1.188204 -0.010905 0.310000 0.062145
0.075000 4.128572 2.245399 -0.384097 0.009787 -1.136556 -0.010915 0.270000 0.057688
0.100000 4.083878 2.448406 -0.398178 -0.019212 -1.091818 -0.010515 0.210000 0.056591
0.150000 3.664775 2.766866 -0.376248 -0.057638 -1.019992 -0.010036 0.050000 0.043918
0.200000 3.252018 2.968006 -0.339355 -0.075944 -1.007291 -0.008344 0.000000 0.032812
0.300000 2.549538 3.181794 -0.270357 -0.084010 -1.028688 -0.004909 0.000000 0.032229
0.400000 1.929709 3.276521 -0.217924 -0.077582 -1.038443 -0.003369 0.000000 0.034334
0.500000 1.436627 3.317359 -0.179491 -0.067294 -1.052000 -0.002598 0.040000 0.037175
""")
|
StarcoderdataPython
|
261985
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
SUSY_HLT_InclusiveHT_800 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT800_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT800Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHTo800oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT800_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_900 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT900_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT900Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHTo900oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT900_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux125 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT125_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT125Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux125oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT125_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux200 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT200_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT200Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux200oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT200_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux250 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT250_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT250Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux250oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT250_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux300 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT300_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT300Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux300oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT300_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux350 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT350_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT350Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux350oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT350_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux400 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT400_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT400Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux400oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT400_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux475 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT475_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT475Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux475oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT475_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux600 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT600_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT600Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux600oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT600_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT = cms.Sequence(SUSY_HLT_InclusiveHT_aux125 +
SUSY_HLT_InclusiveHT_aux200 +
SUSY_HLT_InclusiveHT_aux250 +
SUSY_HLT_InclusiveHT_aux300 +
SUSY_HLT_InclusiveHT_aux350 +
SUSY_HLT_InclusiveHT_aux400 +
SUSY_HLT_InclusiveHT_aux475 +
SUSY_HLT_InclusiveHT_aux600 +
SUSY_HLT_InclusiveHT_800 +
SUSY_HLT_InclusiveHT_900
)
SUSY_HLT_InclusiveHT_POSTPROCESSING = cms.Sequence(SUSYoHLToInclusiveHToAux125oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux200oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux250oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux300oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux350oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux400oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux475oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux600oPOSTPROCESSING +
SUSYoHLToInclusiveHTo800oPOSTPROCESSING +
SUSYoHLToInclusiveHTo900oPOSTPROCESSING
)
|
StarcoderdataPython
|
1795653
|
import os
import argparse
import random
from tqdm import tqdm
import logging
from typing import Dict
logger = logging.getLogger(__name__)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Sample Sentences from monolingual corpora to train tokenizer")
parser.add_argument(
"--datasets_path",
type=str,
required=True,
help="Path containing monolingual corpora for different languages",
)
parser.add_argument("--output_path", type=str, required=True, help="path to store sampled sentences")
parser.add_argument("--alpha", type=float, default=0.3, help="multinomial alpha")
parser.add_argument("--seed", type=int, default=10, help="random seed")
return parser
def calc_num_samples_sentences(
lang_num_lines: Dict[str, int], alpha: float
) -> Dict[str, int]:
lang_prob = {}
total_sentences = sum(lang_num_lines.values())
for key, value in lang_num_lines.items():
lang_prob[key] = value / total_sentences
total_distr = 0
for k, v in lang_prob.items():
total_distr += v**alpha
new_prob = {k: v**alpha / total_distr for k, v in lang_prob.items()}
sampled_sentences = {}
for language, num_lines in lang_num_lines.items():
for lang_code, sampled_prob in new_prob.items():
if language == lang_code:
num_sentences = sampled_prob * num_lines
sampled_sentences[language] = round(num_sentences)
return sampled_sentences
def main():
parser = get_parser()
args = parser.parse_args()
random.seed(args.seed)
logger.info("***** Sampling Sentences for Tokenizer Training *****")
files = [
os.path.join(args.datasets_path, file)
for file in os.listdir(args.datasets_path)
]
logger.info(f"Number of training files found: {len(files)}")
lang_corpus = {}
lang_num_lines = {}
for file in files:
lang_code = file.split(".")[-1]
with open(file) as f:
txt = f.readlines()
lang_corpus[lang_code] = txt
lang_num_lines[lang_code] = len(txt)
sampled_sentences = calc_num_samples_sentences(lang_num_lines, args.alpha)
for lang in tqdm(sampled_sentences.keys()):
logger.info(f"Number of sampled sentences for {lang} = {sampled_sentences[lang]}")
sentences = random.sample(lang_corpus[lang], sampled_sentences[lang])
file = os.path.join(args.output_path, "sampled." + lang)
with open(file, "w") as out_file:
out_file.writelines(sentences)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
344774
|
# -----------------------------------------------------
# Initial Settings for the Project
#
# Author: <NAME>
# Creating Date: May 29, 2018
# Latest rectifying: Jun 5, 2018
# -----------------------------------------------------
import sys
import time
import functools
# import matplotlib
def clock_non_return(func):
@functools.wraps(func)
def clocked(*args, **kwargs):
t0 = time.time()
func(*args, **kwargs)
elapsed = time.time() - t0
if elapsed < 60:
trans_elap = elapsed
unit = 'seconds'
elif elapsed < 3600:
trans_elap = elapsed / 60
unit = 'minutes'
else:
trans_elap = elapsed / 3600
unit = 'hours'
print('\n' + '*' * 40)
print('Entire process costs {:.2f} {:s}.'.format(trans_elap, unit))
return clocked
def add_path(path):
if dir not in sys.path:
sys.path.append(path)
# matplotlib.use('Qt5Agg')
|
StarcoderdataPython
|
4938374
|
<filename>scrips/search_eval/run_selfcenter_eval_search.py<gh_stars>0
import os
import sys
import prody as pr
import numpy as np
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot.search import search, search_eval
from metalprot.basic import filter
import pickle
'''
python /mnt/e/GitHub_Design/Metalprot/scrips/search_eval/run_selfcenter_eval_search.py
'''
query_dir = '/mnt/e/DesignData/ligands/ZN_rcsb_datesplit/20211013/20211013_selfcenter/pickle_noCYS/'
with open(query_dir + 'all_metal_vdm.pkl', 'rb') as f:
query_all_metal = pickle.load(f)
with open(query_dir + 'AAMetalPhiPsi.pkl', 'rb') as f:
all_querys = pickle.load(f)
with open(query_dir + 'cluster_centroid_dict.pkl', 'rb') as f:
cluster_centroid_dict = pickle.load(f)
print(len(all_querys))
### run Search_struct
# workdir = '/mnt/e/DesignData/ligands/LigandBB/MID1sc10/'
# outdir = workdir + 'output_eval_selfcenter_/'
# target_path = workdir + '5od1_zn.pdb'
# win_filter = [34, 60, 64]
# workdir = '/mnt/e/DesignData/ligands/LigandBB/6dwv/'
# outdir = workdir + 'output_selfcenter_eval/'
# target_path = workdir + '6dwv_core.pdb'
# win_filter = [4, 6, 15]
workdir = '/mnt/e/DesignData/ligands/LigandBB/6zw1/'
outdir = workdir + 'output_eval_selfcenter_/'
target_path = workdir + '6zw1_ZN_1.pdb'
win_filter = []
metal_metal_dist = 0.45
num_contact_vdms = [3]
allowed_aa_combinations = []
_filter = filter.Search_filter(filter_abple = False, filter_phipsi = True, max_phipsi_val = 25,
filter_vdm_score = False, min_vdm_score = 0, filter_vdm_count = False, min_vdm_clu_num = 20,
after_search_filter_geometry = True, filter_based_geometry_structure = True, angle_tol = 12, aa_aa_tol = 0.3, aa_metal_tol = 0.2,
pair_angle_range = [85, 130], pair_aa_aa_dist_range = [2.8, 4], pair_metal_aa_dist_range = None,
after_search_filter_qt_clash = True, vdm_vdm_clash_dist = 2.7, vdm_bb_clash_dist = 2.2,
write_filtered_result = False, selfcenter_filter_member_phipsi=True)
ss = search_eval.Search_eval(target_path, outdir, all_querys, cluster_centroid_dict, query_all_metal,
num_contact_vdms, metal_metal_dist, win_filter, validateOriginStruct = True, search_filter= _filter, geometry_path = None,
density_radius = 0.65, allowed_aa_combinations = allowed_aa_combinations, eval_mmdist=False, eval_density=False)
ss.run_eval_selfcenter_search()
|
StarcoderdataPython
|
3330914
|
#!/usr/bin/env python3
"""
Model representation of a docheading type Element from doxygen
<xsd:complexType name="docHeadingType" mixed="true">
<xsd:group ref="docTitleCmdGroup" minOccurs="0" maxOccurs="unbounded" />
<xsd:attribute name="level" type="xsd:integer" /> <!-- todo: range 1-6 -->
</xsd:complexType>
"""
from ..groups.doctitlecmdgroup import DocTitleCmdGroup
class DocCopy(DocTitleCmdGroup):
def __init__(self, node):
super.__init__(node)
def get_level(self):
return self.get('level')
|
StarcoderdataPython
|
8078115
|
<filename>speed_challenge.py
import cv2
import os
import sys
import numpy as np
from sklearn import linear_model
import queue
from tools import movingAverage, plot, computeAverage
import matplotlib.pyplot as plt
class Speed_Car():
def __init__(self, video_train_path, text_train_path, video_test_path):
# Train video and text path
self.v_train = cv2.VideoCapture(video_train_path)
self.t_train = text_train_path
# Number of frames for 17 min o f 20 fps video
self.n_frames = 17*60*20
# Read test video
self.test_vid = cv2.VideoCapture(video_test_path)
# Generate test.txt
self.predict = True
# Generate visualization
self.visual = False
# Separate function to allow for different methods to be inculcated into the same class
self.parameters()
# test text directory
self.t_text = True
# See the camera in the test
self.camera = False
def parameters(self):
""" Extract parameters for the Lucas-Kanade method """
# Using Lucas-Kanade method to estimate the optical flow
self.lkparameter = dict(winSize=(21, 21),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.01))
self.frame_idx = 0
self.prev_pts = None
self.detect_interval = 1
self.temp_preds = np.zeros(int(self.v_train.get(cv2.CAP_PROP_FRAME_COUNT)))
""" load traning text file """
with open(self.t_train, 'r') as file_:
gt = file_.readlines()
gt = [float(x.strip()) for x in gt]
self.gt = np.array(gt[:self.n_frames])
self.window = 80 # for moving average
self.prev_gray = None
def focus(self, mask=None, test=False):
""" Focus on the road """
vid = self.test_vid if test else self.v_train
if mask is None:
W = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
H = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
mask = np.zeros(shape=(H, W), dtype=np.uint8)
mask.fill(255)
else:
W = mask.shape[1]
H = mask.shape[0]
cv2.rectangle(mask, (0, 0), (W, H), (0, 0, 0), -1)
x_top_offset = 240
x_btm_offset = 65
poly_pts = np.array([[[640-x_top_offset, 250], [x_top_offset, 250],
[x_btm_offset, 350], [640-x_btm_offset, 350]]], dtype=np.int32)
cv2.fillPoly(mask, poly_pts, (255, 255, 255))
return mask
def opticalflow(self, frame):
""" calculating optical flow """
# blur the surrondings
frame = cv2.GaussianBlur(frame, (3, 3), 0)
# Store Flow (x, y, dx, dy)
curr_pts, _st, _err = cv2.calcOpticalFlowPyrLK(
self.prev_gray, frame, self.prev_pts, None, **self.lkparameter)
# Store Flow(x, y, dx, dy)
flow = np.hstack((self.prev_pts.reshape(-1, 2),
(curr_pts - self.prev_pts).reshape(-1, 2)))
preds = []
for x, y, u, v in flow:
if v < -0.05:
continue
# Translate points to center
x -= frame.shape[1]/2
y -= frame.shape[0]/2
# Append Preds taking care of stability issues
if y == 0 or (abs(u) - abs(v)) > 11:
preds.append(0)
preds.append(0)
elif x == 0:
preds.append(0)
preds.append(v/y**2)
else:
preds.append(u/y**2)
preds.append(v/y**2)
return [n for n in preds if n >= 0]
def KeyPts(self, offset_x=0, offset_y=0):
""" return key points from """
if self.prev_pts is None:
return None
return [cv2.KeyPoint(x=p[0][0] + offset_x, y=p[0][1] + offset_y, _size=10) for p in self.prev_pts]
def features(self, frame, mask):
return cv2.goodFeaturesToTrack(frame, 30, 0.1, 10, blockSize=10, mask=mask)
def run(self):
# Construct mask first
mask = self.focus()
prev_key_pts = None
while self.v_train.isOpened() and self.frame_idx < len(self.gt):
ret, frame = self.v_train.read()
if not ret:
break
# Convert to B/W
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = frame_gray[130:350, 35:605]
mask_vis = frame.copy() # visualiatiob
# Process each frame
if self.prev_pts is None:
self.temp_preds[self.frame_idx] = 0
else:
# Get mediun of V/hf values
preds = self.opticalflow(frame_gray)
self.temp_preds[self.frame_idx] = np.median(
preds) if len(preds) else 0
# Extract features
self.prev_pts = self.features(frame_gray, mask[130:350, 35:605])
self.prev_gray = frame_gray
self.frame_idx += 1
# for Visualization purpose only
if self.visual:
prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
# self.video.release()
self.v_train.release()
# split train mp4 to train and validation
split = self.frame_idx//20
train_preds = self.temp_preds[:self.frame_idx-split]
val_preds = self.temp_preds[self.frame_idx - split:self.frame_idx]
gt_train = self.gt[:len(train_preds)]
gt_val = self.gt[len(train_preds):self.frame_idx]
# fit to ground truth (moving average)
preds = movingAverage(train_preds, self.window)
lin_reg = linear_model.LinearRegression(fit_intercept=False)
lin_reg.fit(preds.reshape(-1, 1), gt_train)
hf_factor = lin_reg.coef_[0]
print("Estimated hf factor = {}".format(hf_factor))
# estimate training error
pred_speed_train = train_preds * hf_factor
pred_speed_train = movingAverage(pred_speed_train, self.window)
self.mse_train = np.mean((pred_speed_train - gt_train)**2)
print("Mean Squared Error for train dataset", self.mse_train)
# estimate validation error
pred_speed_val = val_preds * hf_factor
pred_speed_val = movingAverage(pred_speed_val, self.window)
self.mse_test = np.mean((pred_speed_val - gt_val)**2)
print("Mean Squared Error for validation dataset", self.mse_test)
return hf_factor
def visualize(self, frame, mask_vis, prev_key_pts, speed=None):
self.focus(mask_vis)
mask_vis = cv2.bitwise_not(mask_vis)
frame_vis = cv2.addWeighted(frame, 1, mask_vis, 0.3, 0)
key_pts = self.KeyPts(35, 130)
cv2.drawKeypoints(frame_vis, key_pts, frame_vis, color=(0, 0, 255))
cv2.drawKeypoints(frame_vis, prev_key_pts,
frame_vis, color=(0, 255, 0))
if speed:
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame_vis, "speed {}".format(speed),
(10, 35), font, 1.2, (0, 0, 255))
cv2.imshow('test', frame_vis)
return key_pts
def test(self, hf_factor, save_txt=True):
mask = self.focus(test=True)
self.prev_gray = None
test_preds = np.zeros(int(self.test_vid.get(cv2.CAP_PROP_FRAME_COUNT)))
frame_idx = 0
frame_index =[]
curr_estimate = 0
prev_key_pts = None
self.prev_pts = None
while self.test_vid.isOpened():
ret, frame = self.test_vid.read()
if not ret:
break
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = frame_gray[130:350, 35:605]
# process each frame
prev_speed = 0
if self.prev_pts is None:
test_preds[frame_idx] = 0
else:
# get median of predicted V/hf values
preds = self.opticalflow(frame_gray)
prev_speed = np.median(preds) * hf_factor if len(preds) else 0
test_preds[frame_idx] = prev_speed
# Extract features
self.prev_pts = self.features(frame_gray, mask[130:350, 35:605])
self.prev_gray = frame_gray
frame_idx += 1
frame_index.append(frame_idx)
# for visulization
mask_vis = frame.copy() # <- For visualization
vis_pred_speed = computeAverage(
test_preds, self.window//2, frame_idx)
if self.camera:
prev_key_pts = self.visualize(
frame, mask_vis, prev_key_pts, speed=vis_pred_speed)
if self.predict:
with open("test.txt", "w") as file_:
for item in test_preds:
file_.write("%s \n" % item)
print('predictions value are successfully saved in the text.txt file in the current directory')
if __name__ == '__main__':
video_train_path = 'data/train.mp4'
text_train_path = 'data/train.txt'
video_test_path = 'data/test.mp4'
speedcar = Speed_Car(video_train_path, text_train_path, video_test_path)
hf = speedcar.run()
speedcar.test(hf, True)
print(speedcar.mse_train)
print(speedcar.mse_test)
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1975733
|
<reponame>zainhussaini/salat
from time import daylight
import salat
import datetime as dt
import math
import pytz
KAABAH_LONG_LAT = (39.8262, 21.4225)
EPOCH = dt.date(2000, 1, 1)
TIMEZONES = [
dt.timezone.utc,
dt.timezone(dt.timedelta(), "UTC"),
dt.timezone(dt.timedelta(hours=3), "AST"),
dt.timezone(dt.timedelta(hours=-5), "EST"),
pytz.timezone("US/Eastern"),
]
def time_close(time1: dt.datetime, time2: dt.datetime, delta: dt.timedelta) -> bool:
"""Checks that two datetimes are within delta of each other
Args:
time1 (dt.datetime): first datetime
time2 (dt.datetime): second datetime
Returns:
bool: True if both datetimes are within delta of each other
"""
if not math.isclose((time1 - time2).total_seconds(), 0, rel_tol=0, abs_tol=delta.total_seconds()):
print(time1)
print(time2)
print(time1 - time2)
return False
else:
return True
def parse_line(line: str, date: dt.date, timezone: dt.tzinfo):
# line = "05:53 AM 06:58 AM 12:24 PM 03:29 PM 05:50 PM 06:56 PM"
times = [a.strip() for a in line.split(" ")]
hour_minute = []
for time in times:
h, mp = time.split(":")
m, p = mp.split(" ")
if p.upper() == "AM":
hour = int(h)
if h == "12":
hour = 0
elif p.upper() == "PM":
hour = int(h) + 12
if h == "12":
hour = 12
minute = int(m)
hour_minute.append((hour, minute))
assert len(hour_minute) == 6
true_times = []
for hour, minute in hour_minute:
true_times.append(dt.datetime(date.year, date.month, date.day, hour, minute, tzinfo=timezone))
assert len(true_times) == 6
names = ["fajr", "sunrise", "dhuhr", "asr", "maghrib", "isha"]
return {names[i]: true_times[i] for i in range(6)}
def test_ISNA_Kaaba_epoch():
long, lat = KAABAH_LONG_LAT
calc_method = salat.CalculationMethod.ISNA
asr_method = salat.AsrMethod.STANDARD
date = EPOCH
for tz in TIMEZONES:
pt = salat.PrayerTimes(calc_method, asr_method)
times = pt.calc_times(date, tz, long, lat)
# https://www.islamicfinder.org/prayer-times/
line = "05:53 AM 06:58 AM 12:24 PM 03:29 PM 05:50 PM 06:56 PM"
timezone = dt.timezone(dt.timedelta(hours=3))
true_times = parse_line(line, date, timezone)
delta = dt.timedelta(minutes=1)
for name in true_times:
assert time_close(times[name], true_times[name], delta)
def test_MWL_Kaaba_epoch():
long, lat = KAABAH_LONG_LAT
calc_method = salat.CalculationMethod.MWL
asr_method = salat.AsrMethod.STANDARD
date = EPOCH
for tz in TIMEZONES:
pt = salat.PrayerTimes(calc_method, asr_method)
times = pt.calc_times(date, tz, long, lat)
# https://www.islamicfinder.org/prayer-times/
line = "05:40 AM 06:58 AM 12:24 PM 03:29 PM 05:50 PM 07:05 PM"
timezone = dt.timezone(dt.timedelta(hours=3))
true_times = parse_line(line, date, timezone)
delta = dt.timedelta(minutes=1)
for name in true_times:
assert time_close(times[name], true_times[name], delta)
def test_Makkah_Kaaba_epoch():
long, lat = KAABAH_LONG_LAT
calc_method = salat.CalculationMethod.MAKKAH
asr_method = salat.AsrMethod.STANDARD
date = EPOCH
for tz in TIMEZONES:
pt = salat.PrayerTimes(calc_method, asr_method)
times = pt.calc_times(date, tz, long, lat)
# https://www.islamicfinder.org/prayer-times/
line = "05:37 AM 06:58 AM 12:24 PM 03:29 PM 05:50 PM 07:50 PM"
timezone = dt.timezone(dt.timedelta(hours=3))
true_times = parse_line(line, date, timezone)
delta = dt.timedelta(minutes=1)
for name in true_times:
print(name)
assert time_close(times[name], true_times[name], delta)
def test_Makkah_Kaaba_Jan30_2000():
# after Ramadan
long, lat = KAABAH_LONG_LAT
calc_method = salat.CalculationMethod.MAKKAH
asr_method = salat.AsrMethod.STANDARD
date = dt.date(2000, 1, 30)
for tz in TIMEZONES:
pt = salat.PrayerTimes(calc_method, asr_method)
times = pt.calc_times(date, tz, long, lat)
# https://www.islamicfinder.org/prayer-times/
line = "05:41 AM 06:59 AM 12:34 PM 03:46 PM 06:09 PM 07:39 PM"
timezone = dt.timezone(dt.timedelta(hours=3))
true_times = parse_line(line, date, timezone)
delta = dt.timedelta(minutes=1)
for name in true_times:
assert time_close(times[name], true_times[name], delta)
def test_Jafari_Kaaba_epoch():
long, lat = KAABAH_LONG_LAT
calc_method = salat.CalculationMethod.JAFARI
asr_method = salat.AsrMethod.STANDARD
date = EPOCH
for tz in TIMEZONES:
pt = salat.PrayerTimes(calc_method, asr_method)
times = pt.calc_times(date, tz, long, lat)
# https://www.islamicfinder.org/prayer-times/
# Note: this website does not adjust Maghrib angle correctly
line = "05:49 AM 06:58 AM 12:24 PM 03:29 PM 05:50 PM 06:51 PM"
timezone = dt.timezone(dt.timedelta(hours=3))
true_times = parse_line(line, date, timezone)
delta = dt.timedelta(minutes=1)
for name in true_times:
if name != "maghrib":
assert time_close(times[name], true_times[name], delta)
# TODO:
# 1. check locations where sign if longitude and timezone offset are different (ie. long = -170, timezone= +12)
# 2. check daylight savings time transition points
# 4. check negative latitudes
# 3. check high latitudes
|
StarcoderdataPython
|
154895
|
<reponame>dbms-ctzs/sage
from django.http import HttpResponse
from django.shortcuts import redirect
# Unauthenticated user will be redirected >> "home"
def unauthenticated_user(view_func):
def wrapper_func(request,*args,**kwargs):
if request.user.is_authenticated:
return redirect('home')
else:
return view_func(request,*args,**kwargs)
return wrapper_func
#Only allowed users are auth to view
def allowed_users(allowed_roles=[]):
def decorator(view_func):
def wrapper_func(request,*args,**kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group in allowed_roles:
return view_func(request,*args,**kwargs)
else:
return HttpResponse("You are not authorized to view this page ")
return wrapper_func
return decorator
'''def admin_only(view_func):
def wrapper_function(request,*args,**kwargs):
group=None
if request.user.groups.exists():
group=request.user.groups.all()[0].name
if group =='user':
return redirect('user-page')
if group == 'admin':
return view_func(request,*args,**kwargs)
return wrapper_function '''
|
StarcoderdataPython
|
8169102
|
<reponame>sky-dust-intelligence-bv/nni
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import itertools
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from nni.algorithms.compression.v2.pytorch.base import Pruner
from nni.compression.pytorch.utils.shape_dependency import ChannelDependency, GroupDependency
from .base import SparsityAllocator
class NormalSparsityAllocator(SparsityAllocator):
"""
This allocator simply pruned the weight with smaller metrics in layer level.
"""
def generate_sparsity(self, metrics: Dict[str, Tensor]) -> Dict[str, Dict[str, Tensor]]:
masks = {}
for name, wrapper in self.pruner.get_modules_wrapper().items():
sparsity_rate = wrapper.config['total_sparsity']
assert name in metrics, 'Metric of {} is not calculated.'.format(name)
# We assume the metric value are all positive right now.
metric = metrics[name]
if self.continuous_mask:
metric *= self._compress_mask(wrapper.weight_mask) # type: ignore
prune_num = int(sparsity_rate * metric.numel())
if prune_num == 0:
threshold = metric.min() - 1
else:
threshold = torch.topk(metric.view(-1), prune_num, largest=False)[0].max()
mask = torch.gt(metric, threshold).type_as(metric)
masks[name] = self._expand_mask(name, mask)
if self.continuous_mask:
masks[name]['weight'] *= wrapper.weight_mask
return masks
class BankSparsityAllocator(SparsityAllocator):
"""
In bank pruner, all values in weight are divided into different sub blocks each shape
aligned with balance_gran. Each sub block has the same sparsity which equal to the overall sparsity.
This allocator pruned the weight in the granularity of block.
"""
def __init__(self, pruner: Pruner, balance_gran: list):
super().__init__(pruner)
self.balance_gran = balance_gran
for gran in self.balance_gran:
assert isinstance(gran, int) and gran > 0, 'All values in list balance_gran \
should be type int and bigger than zero'
def generate_sparsity(self, metrics: Dict[str, Tensor]) -> Dict[str, Dict[str, Tensor]]:
masks = {}
for name, wrapper in self.pruner.get_modules_wrapper().items():
sparsity_rate = wrapper.config['total_sparsity']
assert name in metrics, 'Metric of {} is not calculated.'.format(name)
# We assume the metric value are all positive right now.
metric = metrics[name]
if self.continuous_mask:
metric *= self._compress_mask(wrapper.weight_mask) # type: ignore
n_dim = len(metric.shape)
assert n_dim >= len(self.balance_gran), 'Dimension of balance_gran should be smaller than metric'
# make up for balance_gran
balance_gran = [1] * (n_dim - len(self.balance_gran)) + self.balance_gran
for i, j in zip(metric.shape, balance_gran):
assert i % j == 0, 'Length of {} weight is not aligned with balance granularity'.format(name)
mask = torch.zeros(metric.shape).type_as(metric)
loop_iters = [range(int(i / j)) for i, j in zip(metric.shape, balance_gran)]
for iter_params in itertools.product(*loop_iters):
index_str_list = [f"{iter_param * gran}:{(iter_param+1) * gran}"\
for iter_param, gran in zip(iter_params, balance_gran)]
index_str = ",".join(index_str_list)
sub_metric_str = "metric[{}]".format(index_str)
sub_mask_str = "mask[{}] = mask_bank".format(index_str)
metric_bank = eval(sub_metric_str)
prune_num = int(sparsity_rate * metric_bank.numel())
if prune_num == 0:
threshold = metric_bank.min() -1
else:
threshold = torch.topk(metric_bank.reshape(-1), prune_num, largest=False)[0].max()
# mask_bank will be used in exec(sub_mask_str)
mask_bank = torch.gt(metric_bank, threshold).type_as(metric_bank)
exec(sub_mask_str)
masks[name] = self._expand_mask(name, mask)
if self.continuous_mask:
masks[name]['weight'] *= wrapper.weight_mask
return masks
class GlobalSparsityAllocator(SparsityAllocator):
"""
This allocator pruned the weight with smaller metrics in group level.
This means all layers in a group will sort metrics uniformly.
The layers with the same config in config_list is a group.
"""
def generate_sparsity(self, metrics: Dict) -> Dict[str, Dict[str, Tensor]]:
masks = {}
# {group_index: {layer_name: metric}}
grouped_metrics = {idx: {name: metrics[name] for name in names}
for idx, names in self.pruner.generate_module_groups().items()}
for _, group_metric_dict in grouped_metrics.items():
threshold, sub_thresholds = self._calculate_threshold(group_metric_dict)
for name, metric in group_metric_dict.items():
mask = torch.gt(metric, min(threshold, sub_thresholds[name])).type_as(metric)
masks[name] = self._expand_mask(name, mask)
if self.continuous_mask:
masks[name]['weight'] *= self.pruner.get_modules_wrapper()[name].weight_mask
return masks
def _calculate_threshold(self, group_metric_dict: Dict[str, Tensor]) -> Tuple[float, Dict[str, float]]:
metric_list = []
sub_thresholds = {}
total_weight_num = 0
temp_wrapper_config = self.pruner.get_modules_wrapper()[list(group_metric_dict.keys())[0]].config
total_sparsity = temp_wrapper_config['total_sparsity']
max_sparsity_per_layer = temp_wrapper_config.get('max_sparsity_per_layer', {})
for name, metric in group_metric_dict.items():
wrapper = self.pruner.get_modules_wrapper()[name]
# We assume the metric value are all positive right now.
if self.continuous_mask:
metric = metric * self._compress_mask(wrapper.weight_mask) # type: ignore
layer_weight_num = wrapper.weight.data.numel() # type: ignore
total_weight_num += layer_weight_num
expend_times = int(layer_weight_num / metric.numel())
retention_ratio = 1 - max_sparsity_per_layer.get(name, 1)
retention_numel = math.ceil(retention_ratio * layer_weight_num)
removed_metric_num = math.ceil(retention_numel / (wrapper.weight_mask.numel() / metric.numel())) # type: ignore
stay_metric_num = metric.numel() - removed_metric_num
if stay_metric_num <= 0:
sub_thresholds[name] = metric.min().item() - 1
continue
# Remove the weight parts that must be left
stay_metric = torch.topk(metric.view(-1), stay_metric_num, largest=False)[0]
sub_thresholds[name] = stay_metric.max()
if expend_times > 1:
stay_metric = stay_metric.expand(int(layer_weight_num / metric.numel()), stay_metric_num).contiguous().view(-1)
metric_list.append(stay_metric)
total_prune_num = int(total_sparsity * total_weight_num)
if total_prune_num == 0:
threshold = torch.cat(metric_list).min().item() - 1
else:
threshold = torch.topk(torch.cat(metric_list).view(-1), total_prune_num, largest=False)[0].max().item()
return threshold, sub_thresholds
class Conv2dDependencyAwareAllocator(SparsityAllocator):
"""
An allocator specific for Conv2d with dependency-aware.
"""
def __init__(self, pruner: Pruner, dim: int, dummy_input: Any):
assert isinstance(dim, int), 'Only support single dim in Conv2dDependencyAwareAllocator.'
super().__init__(pruner, dim=dim)
self.dummy_input = dummy_input
def _get_dependency(self):
graph = self.pruner.generate_graph(dummy_input=self.dummy_input)
self.pruner._unwrap_model()
self.channel_depen = ChannelDependency(model=self.pruner.bound_model, dummy_input=self.dummy_input, traced_model=graph.trace).dependency_sets
self.group_depen = GroupDependency(model=self.pruner.bound_model, dummy_input=self.dummy_input, traced_model=graph.trace).dependency_sets
self.pruner._wrap_model()
def generate_sparsity(self, metrics: Dict) -> Dict[str, Dict[str, Tensor]]:
self._get_dependency()
masks = {}
grouped_metrics = {}
grouped_names = set()
# combine metrics with channel dependence
for idx, names in enumerate(self.channel_depen):
grouped_metric = {name: metrics[name] for name in names if name in metrics}
grouped_names.update(grouped_metric.keys())
if self.continuous_mask:
for name, metric in grouped_metric.items():
metric *= self._compress_mask(self.pruner.get_modules_wrapper()[name].weight_mask) # type: ignore
if len(grouped_metric) > 0:
grouped_metrics[idx] = grouped_metric
# ungrouped metrics stand alone as a group
ungrouped_names = set(metrics.keys()).difference(grouped_names)
for name in ungrouped_names:
idx += 1 # type: ignore
grouped_metrics[idx] = {name: metrics[name]}
# generate masks
for _, group_metric_dict in grouped_metrics.items():
group_metric = self._group_metric_calculate(group_metric_dict)
sparsities = {name: self.pruner.get_modules_wrapper()[name].config['total_sparsity'] for name in group_metric_dict.keys()}
min_sparsity = min(sparsities.values())
# generate group mask
conv2d_groups, group_mask = [], []
for name in group_metric_dict.keys():
if name in self.group_depen:
conv2d_groups.append(self.group_depen[name])
else:
# not in group_depen means not a Conv2d layer, in this case, assume the group number is 1
conv2d_groups.append(1)
max_conv2d_group = np.lcm.reduce(conv2d_groups)
pruned_per_conv2d_group = int(group_metric.numel() / max_conv2d_group * min_sparsity)
conv2d_group_step = int(group_metric.numel() / max_conv2d_group)
for gid in range(max_conv2d_group):
_start = gid * conv2d_group_step
_end = (gid + 1) * conv2d_group_step
if pruned_per_conv2d_group > 0:
threshold = torch.topk(group_metric[_start: _end], pruned_per_conv2d_group, largest=False)[0].max()
conv2d_group_mask = torch.gt(group_metric[_start:_end], threshold).type_as(group_metric)
else:
conv2d_group_mask = torch.ones(conv2d_group_step, device=group_metric.device)
group_mask.append(conv2d_group_mask)
group_mask = torch.cat(group_mask, dim=0)
# generate final mask
for name, metric in group_metric_dict.items():
# We assume the metric value are all positive right now.
metric = metric * group_mask
pruned_num = int(sparsities[name] * len(metric))
if pruned_num == 0:
threshold = metric.min() - 1
else:
threshold = torch.topk(metric, pruned_num, largest=False)[0].max()
mask = torch.gt(metric, threshold).type_as(metric)
masks[name] = self._expand_mask(name, mask)
if self.continuous_mask:
masks[name]['weight'] *= self.pruner.get_modules_wrapper()[name].weight_mask
return masks
def _group_metric_calculate(self, group_metrics: Union[Dict[str, Tensor], List[Tensor]]) -> Tensor:
"""
Add all metric value in the same position in one group.
"""
group_metrics = list(group_metrics.values()) if isinstance(group_metrics, dict) else group_metrics
assert all(group_metrics[0].size() == group_metric.size() for group_metric in group_metrics), 'Metrics size do not match.'
group_sum_metric = torch.zeros(group_metrics[0].size(), device=group_metrics[0].device)
for group_metric in group_metrics:
group_sum_metric += group_metric
return group_sum_metric
|
StarcoderdataPython
|
5168421
|
import json
import datetime
import boto3
def lambda_handler(event, context):
ec2 = boto3.resource('ec2')
# create a new EC2 instance
instances = ec2.create_instances(
ImageId='ami-09d95fab7fff3776c',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
KeyName='aarpbitbucketpem'
)
return "Success"
|
StarcoderdataPython
|
1717946
|
<filename>cs220/fall_2015/code_examples/trapezoidal_rule/trap.py<gh_stars>1-10
# File: trap.py
# Purpose: Calculate area using trapezoidal rule.
#
# Input: a, b, n
# Output: estimate of area between x-axis, x = a, x = b, and graph of f(x)
# using n trapezoids.
#
# Usage: python trap.py
#
# Note: The function f(x) is hardwired.
from sys import stdin
# Function: Trap
# Purpose: Estimate area using the trapezoidal rule
# Input args: a: left endpoint (double)
# b: right endpoint (double)
# n: number of trapezoids (int)
# h: stepsize = length of base of trapezoids (double)
# Return val: Trapezoidal rule estimate of area between x-axis,
# x = a, x = b, and graph of f(x) (double)
def Trap(a, b, n, h):
area = (f(a) + f(b))/2.0
for i in xrange(1, n):
x = a + i*h;
area = area + f(x)
area = area*h
return area
# End of Trap
# Function: f
# Purpose: Compute value of function to be integrated
# Input args: x (double)
# Return val: f(x) (double)
def f(x):
return_val = x*x + 1
return return_val
# End of f
# main function
print "Enter a, b, and n"
line = stdin.readline()
strings = line.split()
a = float(strings[0])
b = float(strings[1])
n = int(strings[2])
h = (b-a)/n
area = Trap(a, b, n, h)
print "With n =", n, "trapezoids, our estimate"
print "of the area from", a, "to", b, "= %.15f" % area
# End of main
|
StarcoderdataPython
|
11353286
|
from abc import ABC, abstractmethod
from datetime import datetime
class ECGController(ABC):
def __init__(self, dir_name, file_name, file_list):
self.dir_name = dir_name
self.file_name = file_name
self.file_list = file_list
@property
def full_name(self):
return f"{self.dir_name} {self.file_name}"
@property
def current_date(self):
return datetime.now()
@abstractmethod
def get_source_property(self):
pass
|
StarcoderdataPython
|
9628449
|
import logging
import traceback
from queue import SimpleQueue
from confluent_kafka.avro.serializer import SerializerError
# from confluent_kafka.avro import AvroConsumer
import json
from confluent_kafka import KafkaError, Consumer as KafkaConsumer
class Consumer:
def __init__(self, broker, schema_registry, topic, logging_enabled = False, groupId = "asgardConsumerGroup", autocommit = True):
"""
Initialiser for Confluent Consumer using AvroConsumer.
Each consumer can only be subscribed to one topic
Parameters
----------
broker: str
The URL of the broker (example: 'localhost:9092')
schema_registry: str
The URL of the confluent Schema Registry endpoint (example: 'http://localhost:8081')
topic: str
The topic to subscribe too
logger: Logger object, Optional
The logger object which will be used to log messages if provided
groupId: str, Optional
An optional groupId which can be used to loadbalance consumers default is "asgard"
"""
"""self.__consumer = AvroConsumer(
{
"bootstrap.servers": broker,
"group.id": groupId,
"schema.registry.url": schema_registry,
"enable.auto.commit": autocommit
}
)"""
self.__consumer = KafkaConsumer(
{
"bootstrap.servers": broker,
"group.id": groupId,
"enable.auto.commit": autocommit,
"auto.offset.reset": "latest"
}
)
self.autocommit = autocommit
if not autocommit:
self.consumed_messages= SimpleQueue()
self.__consumer.subscribe([topic])
if logging_enabled:
self.logger = logging.getLogger(__name__)
else:
self.logger = None
def consume(self):
"""
Method to consume and return message if exists and can be deserialized
Returns
-------
str
The recieved message payload as a string
None
No message has been recieved or an error has occured
"""
msg = None
try:
msg = self.__consumer.poll(1)
except SerializerError as e:
self.__log_msg(
"Message deserialization has failed {}: {}".format(msg,e),
"See the following stack trace",
f"{traceback.format_exc()}",
delimeter="\n",
level="ERROR")
except RuntimeError as e:
self.__log_msg(
"The consumer has been closed and cannot recieve messages",
level = "ERROR"
)
except Exception as e:
self.__log_msg(
"An unkown error has occured {}".format(e),
"See the following stack trace",
f"{traceback.format_exc()}",
delimeter="\n",
level= "ERROR"
)
if not msg is None:
if msg.error():
self.__log_msg(
"AvroConsumer error: {}".format(msg.error()),
level="ERROR"
)
else:
if not self.autocommit:
self.consumed_messages.put_nowait(
msg
)
return json.loads(msg.value().decode()).get("payload")
def __enter__(self):
return self.__consumer
def __exit__(self, *args):
self.close()
def __log_msg(self, *messages, level="NOTSET", delimeter= " ", ):
levels = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET
}
msg = delimeter.join(messages)
if self.logger is not None:
if level not in levels:
raise ValueError(
f"level {level} is not valid must be one of {list(levels.keys())}"
)
self.logger.log(
levels[level],
msg
)
else:
if level is not None:
print(f"LOGGED MESSAGE: {msg}")
else:
print(f"{level}: {msg}")
def commit(self, asynchronous = True):
if not self.autocommit and not self.consumed_messages.empty():
msg = self.consumed_messages.get_nowait()
self.__consumer.commit(
msg, asynchronous = asynchronous
)
def close(self):
"""
Close the consumer, Once called this object cannot be reused
"""
self.__consumer.close()
|
StarcoderdataPython
|
9708282
|
price_list: dict = {
'coffee':{
'Sofia': 0.5,
'Plovdiv': 0.4,
'Varna': 0.45,
},
'water':{
'Sofia': 0.8,
'Plovdiv': 0.7,
'Varna': 0.7,
},
'beer':{
'Sofia': 1.2,
'Plovdiv': 1.15,
'Varna': 1.1,
},
'sweets':{
'Sofia': 1.45,
'Plovdiv': 1.3,
'Varna': 1.35,
},
'peanuts':{
'Sofia': 1.6,
'Plovdiv': 1.5,
'Varna': 1.55,
},
}
print(price_list[input()][input()] * float(input()))
|
StarcoderdataPython
|
6659827
|
#!/usr/bin/env python3
import os
import sys
import time
import zmq
# _______
# |== []|
# | ==== | *letterbox*
# '-------'
#
# timer - timer and reminder services
# written by <NAME>, 2019-2020 (jclemme at my dot uri dot edu)
|
StarcoderdataPython
|
1893011
|
<reponame>jobevers/vex
import sys
class InvalidArgument(Exception):
"""Raised by anything under main() to propagate errors to user.
"""
def __init__(self, message):
self.message = message
Exception.__init__(self, message)
class NoVirtualenvName(InvalidArgument):
"""No virtualenv name was given (insufficient arguments).
"""
pass
class NoVirtualenvsDirectory(InvalidArgument):
"""There is no directory to find named virtualenvs in.
"""
pass
class OtherShell(InvalidArgument):
"""The given argument to --shell-config is not recognized.
"""
pass
class UnknownArguments(InvalidArgument):
"""Unknown arguments were given on the command line.
This is a byproduct of having to use parse_known_args.
"""
pass
class InvalidVexrc(InvalidArgument):
"""config file specified or required but absent or unparseable.
"""
pass
class InvalidVirtualenv(InvalidArgument):
"""No usable virtualenv was found.
"""
pass
class InvalidCommand(InvalidArgument):
"""No runnable command was found.
"""
pass
class InvalidCwd(InvalidArgument):
"""cwd specified or required but unusable.
"""
pass
class BadConfig(InvalidArgument):
"""raised to halt on fatal conditions on the way to run.
"""
pass
class VirtualenvAlreadyMade(InvalidArgument):
"""could not make virtualenv as one already existed.
"""
pass
class VirtualenvNotMade(InvalidArgument):
"""could not make virtualenv.
"""
pass
class VirtualenvNotRemoved(InvalidArgument):
"""raised when virtualenv could not be removed.
"""
pass
if sys.version_info > (3, 3):
CommandNotFoundError = FileNotFoundError
else:
CommandNotFoundError = OSError
|
StarcoderdataPython
|
5097570
|
import pandas as pd
cols_to_keep = ['name', 'a', 'e', 'i', 'om', 'w', 'q', 'ad', 'per_y', 'data_arc',
'condition_code', 'n_obs_used', 'H', 'neo', 'pha', 'diameter',
'albedo', 'rot_per', 'moid', 'class', 'n', 'per', 'ma']
def clean(main_csv):
df = pd.read_csv(main_csv, header=0, usecols=cols_to_keep)
df.drop(df[df['a'] < 0].index, inplace=True)
au_array = ['a', 'q', 'ad', 'moid']
conv_factor = 1.496e8
for i in au_array:
df[i] = conv_factor * df[i]
df.to_csv("Asteroid_au_to_km.csv")
if __name__ == '__main__':
clean("Asteroid_Updated.csv")
|
StarcoderdataPython
|
156783
|
<reponame>ulgltas/ModalSolver
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# test encoding: à-é-è-ô-ï-€
#
# run script for Modal Solver
def createWdir():
import os
wdir = os.path.join(os.getcwd(), 'workspace')
if not os.path.isdir(wdir):
print("creating", wdir)
os.makedirs(wdir)
os.chdir(wdir)
def parseargs():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--nogui", help="disable any graphical output", action="store_true")
parser.add_argument('file', nargs='*', help='python file')
return parser.parse_args()
if __name__ == "__main__":
import sys, os
# adds "." to the pythonpath
thisdir = os.path.split(__file__)[0]
sys.path.append(thisdir)
# parse arguments
args = parseargs()
# get test
testname = os.path.abspath(args.file[0])
if not os.path.isfile(testname):
raise Exception("file not found: %s" % testname)
__file__ = testname
# setup workspace and start the test
createWdir()
import time, platform
print('*' * 80)
print("starting test", testname)
print("time:", time.strftime("%c"))
print("hostname:", platform.node())
print('*' * 80)
print('* modali')
print('* <NAME> & <NAME>')
print('* ULiege, 2018-2019')
print('* Distributed under Apache license 2.0')
print('*' * 80)
exec(open(testname).read())
|
StarcoderdataPython
|
12857255
|
import numpy as np
from tqdm import tqdm
from scipy.sparse import csr_matrix, hstack, vstack
from sklearn.neighbors import NearestNeighbors
class MFKnn(object):
"""
Implementation of
"""
def __init__(self, metric, k):
self.k = k
self.metric = metric
def fit(self, X, y):
#
self.X_train = X
self.y_train = y
#
self.classes = sorted(map(int, list(set(self.y_train))))
self.n_classes = len(self.classes)
#
self.docs_by_class = [len(np.where(self.y_train == i)[0]) for i in self.classes]
#
self.X_by_class = []
self.knn_by_class = []
#self.scores = {}
#
njobs=-1
if self.metric == 'l1':
njobs=1
for i in self.classes:
X_tmp = self.X_train[np.where(self.y_train == i)]
#print ("xtmp"+str(X_tmp.shape[0])+" class: "+str(i))
data=[]
data.append(0)
ind=[]
ind.append(0)
auxf=csr_matrix((data, (ind,ind)), shape=(1,self.X_train.shape[1]),dtype=np.float64) #zero a linha
if X_tmp.shape[0]<self.k+1:
newxtmp=[]
for iww in list(range(X_tmp.shape[0])):
newxtmp.append(X_tmp[iww])
for iww in list(range(self.k+1-X_tmp.shape[0])):
newxtmp.append(auxf)
X_tmp=vstack(newxtmp)
knn = NearestNeighbors(n_neighbors=self.k+1, algorithm="brute", metric=self.metric, n_jobs=njobs)
knn.fit(X_tmp)
self.knn_by_class.append(knn)
return self
def csr_matrix_equal2(self, a1, a2):
return all((np.array_equal(a1.indptr, a2.indptr),
np.array_equal(a1.indices, a2.indices),
np.array_equal(a1.data, a2.data)))
def transform(self, X):
#
istrain = True if self.csr_matrix_equal2(self.X_train, X) else False
#print(istrain)
n_neighbors = self.k+1 if istrain else self.k
metafeatures = []
scores = {}
for j in self.classes:
if self.metric == "l1" or self.metric == "l2":
scores[j] = 0.0 + self.knn_by_class[j].kneighbors(X, n_neighbors, return_distance=True)[0]
if self.metric == "cosine":
scores[j] = 1.0 - self.knn_by_class[j].kneighbors(X, n_neighbors, return_distance=True)[0]
#
for i, doc in enumerate(X):
for j in self.classes:
if istrain:
if self.y_train[i] == j:
metafeatures += list(scores[j][i][1:])
else:
metafeatures += list(scores[j][i][:-1])
else:
metafeatures += list(scores[j][i])
return np.array(metafeatures).reshape((X.shape[0],self.k*self.n_classes))
|
StarcoderdataPython
|
6509055
|
<reponame>lsandov1/arm-qa-tools
#!/usr/bin/env python3
__copyright__ = """
/*
* Copyright (c) 2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
"""
""" tfa_generate_influxdb_files.py:
Parses the TF-A metrics summary files and generates JSON files
containing data to be written to InfluxDB.
Usage: python3 tfa_generate_influxdb_files.py --defectLog <defect log> \
--complexityLog <complexity log> --loc <code churn loc> \
--gitTagDate <tag date> --influxTime <git tag date & time>
"""
import argparse
import os
import re
import collections
import string
import time
import json
def load_module(name, fpath):
"""
Function to return access to the module
:param: name: Module name to be loaded
:param: fpath: Relative path to complexity_parser.py
:return: Module object
"""
import os
import imp
return imp.load_source(
name, os.path.join(
os.path.dirname(__file__), fpath))
load_module(
"complexity_parser",
"../common_metrics/complexity_parser/complexity_parser.py")
from complexity_parser import ComplexityParser
def args_parse():
global DEFECT_LOG
global COMPLEXITY_LOG
global CODE_CHURN
global BASE_RELEASE_TAG
global TARGET_RELEASE_TAG
global GIT_TAG_DATE
global GIT_TAG_DATE_TIME
# Create parser instance and add arguments
parser = argparse.ArgumentParser(
description="TF-A quality metrics InfluxDB JSON files generator")
parser.add_argument("--defectLog", help="name of the defect log")
parser.add_argument("--complexityLog", help="name of the complexity log")
parser.add_argument("--loc", help="code churn statistics", required=True)
parser.add_argument(
"--baseTag",
help="name of the base release tag",
required=True)
parser.add_argument(
"--targetTag",
help="name of the target release tag",
required=True)
parser.add_argument("--gitTagDate", help="Git Tag Date", required=True)
parser.add_argument(
"--influxTime",
help="InfluxDB time, which is Git Tag Date and Time",
required=True)
# Parse the arguments
args = parser.parse_args()
if args.defectLog:
DEFECT_LOG = args.defectLog
if args.complexityLog:
COMPLEXITY_LOG = args.complexityLog
if args.loc:
CODE_CHURN = args.loc
if args.baseTag:
BASE_RELEASE_TAG = args.baseTag
if args.targetTag:
TARGET_RELEASE_TAG = args.targetTag
if args.gitTagDate:
GIT_TAG_DATE = re.sub('[-]', '', args.gitTagDate)
if args.influxTime:
GIT_TAG_DATE_TIME = args.influxTime
def tfa_generate_defect_data(data):
"""
Function to write the data of defects into influxdb """
dict_list = []
runDate = time.strftime('%H:%M-%x')
# "Issue_Status" acts as an indicative field to help the viewer figure out
# the current status of the bug
defects_tracking = {
"metadata": {
"metrics": "tfa_defects_tracking"
},
"api_version": "1.0",
"data": [{
"measurement": "TFA_Defects_Tracking",
"fields": {
"Issue_Status": "{}".format("Open"),
"Number_of_Defects": int(len(data))
},
"tags": {
"Measured_Date": "{}".format(runDate)
},
}]
}
with open('defects_tracking.json', 'w') as fp:
json.dump(defects_tracking, fp)
# Write details of each defects into the other measurement called
# "TFA_Defects_Statistics"
defect_stats = {}
defect_stats["data"] = []
defect_stats["metadata"] = {}
defect_stats["metadata"]["metrics"] = "tfa_defects_stats"
defect_stats["api_version"] = "1.0"
for ID, description in data.items():
json_body = {
"measurement": "TFA_Defects_Statistics",
"fields": {
"Title": "{}".format(description['title']),
"Issue_Status": "{}".format(description['state']),
"URL": "{}".format(description['url'])
},
"tags": {
"Defect_ID": "{}".format(ID),
"Measured_Date": "{}".format(runDate)
}
}
defect_stats["data"].append(json_body)
with open('defects_statistics.json', 'w') as fp:
json.dump(defect_stats, fp)
def tfa_generate_codechurn_data(data, base_tag, target_tag):
"""
Generates InfluxDB data for TF-A code churn and
writes that to code_churn.json file.
:param: data: Lines of change
:param: base_tag: Release tag prior to target_tag
:param: target_tag: Tag being tested
"""
json_body = {
"metadata": {
"metrics": "tfa_code_churn"
},
"api_version": "1.0",
"data": [{
"measurement": "TFA_CodeChurn_Tracking",
"fields": {
"Lines_of_Change": int(data)
},
"tags": {
"Git_Tag_Date": int(GIT_TAG_DATE),
"Base_Tag": "{}".format(base_tag),
"Target_Tag": "{}".format(target_tag)
},
"time": GIT_TAG_DATE_TIME
}]
}
with open('code_churn.json', 'w') as fp:
json.dump(json_body, fp)
def tfa_generate_complexity_data(data, base_tag, target_tag, threshold):
"""
Generates InfluxDB data for TF-A complexity scores and
writes that to complexity stats and tracking json files.
:param: data: Complexity data
:param: base_tag: Release tag prior to target_tag
:param: target_tag: Tag being tested
:param: threshold: Complexity threshold
"""
complexity_stats = {}
complexity_stats["data"] = []
complexity_stats["metadata"] = {}
complexity_stats["metadata"]["metrics"] = "tfa_complexity_stats"
complexity_stats["api_version"] = "1.0"
totalComplexity = 0
print(
"@@ Number of functions with complexity score > %d: %d" %
(threshold, len(data)))
for k, v in data.items():
# Extract the location and function name
location = k.split(':', 1)[0].strip()
functionID = k.split(':', 1)[1].strip()
json_body = {
"measurement": "TFA_Complexity_Statistics",
"fields": {
"Function_ID": "{}".format(functionID),
"Score": int(v),
"Whitelisted": "{}".format("no"),
"Threshold": int(threshold)
},
"tags": {
"Location": "{}".format(location),
"Git_Tag_Date": int(GIT_TAG_DATE),
"Base_Tag": "{}".format(base_tag),
"Target_Tag": "{}".format(target_tag)
},
"time": GIT_TAG_DATE_TIME
}
complexity_stats["data"].append(json_body)
totalComplexity += int(v)
with open('complexity_stats.json', 'w') as fp:
json.dump(complexity_stats, fp)
totalExceedThreshold = len(data)
complexity_tracking = {
"metadata": {
"metrics": "tfa_complexity_tracking"
},
"api_version": "1.0",
"data": [{
"measurement": "TFA_Complexity_Tracking",
"fields": {
"Threshold": int(threshold),
"Whitelisted": "{}".format("no"),
"Functions_Exceeding_Threshold_Not_Whitelisted": int(totalExceedThreshold)
},
"tags": {
"Git_Tag_Date": int(GIT_TAG_DATE),
"Target_Tag": "{}".format(target_tag)
},
"time": GIT_TAG_DATE_TIME
}]
}
with open('complexity_tracking.json', 'w') as fp:
json.dump(complexity_tracking, fp)
class DefectParser(object):
"""
Extract the following data from the defect/complexity logs:
- defect list: {test class ID:{title: <title>, link: <URL>}}
- int variable: total number of defects
"""
def __init__(self, defectLog):
self.defectLog = defectLog
self.defectDict = collections.OrderedDict()
self.process_defect_log()
def process_defect_log(self):
"""
Function to process defect log and populate the defect dictionary
"""
with open(self.defectLog) as fp:
content = fp.readlines()
baseURL = "https://github.com/ARM-software/tf-issues/issues/"
# Get defect id, title and URL link to populate the defect dictionary
for i in content:
i_strip = i.strip()
titleIDRegex = "^Found open bug with id: ([0-9]+): (.*)"
mIDTitle = re.match(titleIDRegex, i)
if mIDTitle:
defectID = mIDTitle.group(1)
defectTitle = mIDTitle.group(2)
defectURL = baseURL + mIDTitle.group(1)
self.defectDict[defectID] = {}
self.defectDict[defectID]['title'] = defectTitle.split(',')[0]
self.defectDict[defectID]['url'] = defectURL
self.defectDict[defectID]['state'] = defectTitle.split(',')[1]
if __name__ == "__main__":
# Initialise global variables
DEFECT_LOG = ""
COMPLEXITY_LOG = ""
CODE_CHURN = 0
BASE_RELEASE_TAG = 0
TARGET_RELEASE_TAG = 0
# Functions having pmcabbe cylomatic complexity >= TFA_THRESHOLD
# are reported
TFA_THRESHOLD = 11
GIT_TAG_DATE = ""
# parse arguments
args_parse()
# Generate defect data
defectData = DefectParser(DEFECT_LOG)
# Generate complexity data
complexityData = ComplexityParser(COMPLEXITY_LOG, TFA_THRESHOLD)
tfa_generate_defect_data(defectData.defectDict)
tfa_generate_codechurn_data(
CODE_CHURN,
BASE_RELEASE_TAG,
TARGET_RELEASE_TAG)
tfa_generate_complexity_data(
complexityData.complexityDict,
BASE_RELEASE_TAG,
TARGET_RELEASE_TAG,
TFA_THRESHOLD)
|
StarcoderdataPython
|
54692
|
<filename>turq/editor.py<gh_stars>10-100
# pylint: disable=unused-argument
import base64
import hashlib
import html
import mimetypes
import os
import pkgutil
import posixpath
import socket
import socketserver
import string
import threading
import wsgiref.simple_server
import falcon
import werkzeug.formparser
import turq.examples
from turq.util.http import guess_external_url
STATIC_PREFIX = '/static/'
def make_server(host, port, ipv6, password, mock_server):
editor = falcon.API(media_type='text/plain; charset=utf-8',
middleware=[CommonHeaders()])
# Microsoft Edge doesn't send ``Authorization: Digest`` to ``/``.
# Can be circumvented with ``/?``, but I think ``/editor`` is better.
editor.add_route('/editor', EditorResource(mock_server, password))
editor.add_route('/', RedirectResource())
editor.add_sink(static_file, STATIC_PREFIX)
editor.set_error_serializer(text_error_serializer)
return wsgiref.simple_server.make_server(
host, port, editor,
IPv6EditorServer if ipv6 else EditorServer,
EditorHandler)
def text_error_serializer(req, resp, exc):
resp.body = exc.title
class EditorServer(socketserver.ThreadingMixIn,
wsgiref.simple_server.WSGIServer):
address_family = socket.AF_INET
allow_reuse_address = True
daemon_threads = True
def handle_error(self, request, client_address):
# Do not print tracebacks.
pass
class IPv6EditorServer(EditorServer):
address_family = socket.AF_INET6
class EditorHandler(wsgiref.simple_server.WSGIRequestHandler):
def log_message(self, *args): # Do not log requests and responses.
pass
class EditorResource:
realm = 'Turq editor'
template = string.Template(
pkgutil.get_data('turq', 'editor/editor.html.tpl').decode('utf-8'))
def __init__(self, mock_server, password):
self.mock_server = mock_server
self.password = password
self.nonce = self.new_nonce()
self._lock = threading.Lock()
def on_get(self, req, resp):
self.check_auth(req)
resp.content_type = 'text/html; charset=utf-8'
(mock_host, mock_port, *_) = self.mock_server.server_address
resp.body = self.template.substitute(
mock_host=html.escape(mock_host), mock_port=mock_port,
mock_url=html.escape(guess_external_url(mock_host, mock_port)),
rules=html.escape(self.mock_server.rules),
examples=turq.examples.load_html(initial_header_level=3))
def on_post(self, req, resp):
self.check_auth(req)
# Need `werkzeug.formparser` because JavaScript sends ``FormData``,
# which is encoded as multipart.
(_, form, _) = werkzeug.formparser.parse_form_data(req.env)
if 'rules' not in form:
raise falcon.HTTPBadRequest('Bad form')
try:
self.mock_server.install_rules(form['rules'])
except SyntaxError as exc:
resp.status = falcon.HTTP_422 # Unprocessable Entity
resp.body = str(exc)
else:
resp.status = falcon.HTTP_303 # See Other
resp.location = '/editor'
resp.body = 'Rules installed successfully.'
# We use HTTP digest authentication here, which provides a fairly high
# level of protection. We use only one-time nonces, so replay attacks
# should not be possible. An active man-in-the-middle could still intercept
# a request and substitute their own rules; the ``auth-int`` option
# is supposed to protect against that, but Chrome and Firefox (at least)
# don't seem to support it.
def check_auth(self, req):
if not self.password:
return
auth = werkzeug.http.parse_authorization_header(req.auth)
password_ok = False
if self.check_password(req, auth):
password_ok = True
with self._lock:
if auth.nonce == self.nonce:
self.nonce = self.new_nonce()
return
raise falcon.HTTPUnauthorized(headers={
'WWW-Authenticate':
'Digest realm="%s", qop="auth", charset=UTF-8, '
'nonce="%s", stale=%s' %
(self.realm, self.nonce, 'true' if password_ok else 'false')})
def check_password(self, req, auth):
if not auth:
return False
a1 = '%s:%s:%s' % (auth.username, self.realm, self.password)
a2 = '%s:%s' % (req.method, auth.uri)
response = self.h('%s:%s:%s:%s:%s:%s' % (self.h(a1),
auth.nonce, auth.nc,
auth.cnonce, auth.qop,
self.h(a2)))
return auth.response == response
@staticmethod
def h(s): # pylint: disable=invalid-name
return hashlib.md5(s.encode('utf-8')).hexdigest().lower()
@staticmethod
def new_nonce():
return base64.b64encode(os.urandom(18)).decode()
class RedirectResource:
def on_get(self, req, resp):
raise falcon.HTTPFound('/editor')
on_post = on_get
def static_file(req, resp):
path = '/' + req.path[len(STATIC_PREFIX):]
path = posixpath.normpath(path.replace('\\', '/')) # Avoid path traversal
try:
resp.data = pkgutil.get_data('turq', 'editor%s' % path)
except FileNotFoundError:
raise falcon.HTTPNotFound()
else:
(resp.content_type, _) = mimetypes.guess_type(path)
class CommonHeaders:
def process_response(self, req, resp, resource, req_succeeded):
# This server is very volatile: who knows what will be listening
# on this host and port tomorrow? So, disable caching completely.
# We don't want Chrome to "Show saved copy" when Turq is down, etc.
resp.cache_control = ['no-store']
# For some reason, under some circumstances, Internet Explorer 11
# falls back to IE 7 compatibility mode on the Turq editor.
resp.append_header('X-UA-Compatible', 'IE=edge')
|
StarcoderdataPython
|
1644192
|
#
# Copyright 2016 <NAME> for Puppet Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import subprocess
import os
from settings import Settings
class Utils:
@staticmethod
def docker_terminal(command=''):
settings = Settings()
# use terminal provided in settings file, otherwise detect
if settings.terminal_program:
shell = settings.terminal_program + " -e \"{command}\""
else:
p = platform.system()
if p == "Darwin":
# http://stackoverflow.com/questions/989349/running-a-command-in-a-new-mac-os-x-terminal-window
shell="osascript -e 'tell application \"Terminal\" to do script \"{command}\"'"
elif p == "Linux":
shell="xterm -e \"{command}\""
else:
raise("unsupported os " + p)
subprocess.Popen(shell.format(command=command), shell=True)
@staticmethod
def first_existing_file(files):
"""Return the name of the first file that exists in `files` or `False` if none can be Found"""
found = False
i = 0
while not found and i < len(files):
if os.path.isfile(files[i]):
found = files[i]
i += 1
return found
|
StarcoderdataPython
|
212209
|
<reponame>ryankanno/vor
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import PhoneNumberProvider
class UsPhoneNumberProvider(PhoneNumberProvider):
def __init__(self, *args, **kwargs):
super(PhoneNumberProvider, self).__init__(*args, **kwargs)
def get_phone_number(self):
raise NotImplementedError # pragma: no cover
# vim: filetype=python
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.