filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_22486 | import docker
from docker.errors import ContainerError
import logging
import pytest
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize(
'language,version_output',
[
('python', ['Python', '3.8.6\n']),
('R', ['R', 'version', '3.6.3',],),
],
)
def test_languages(language, version_output):
"""Ensure that the language is available in the container's PATH and that
it has the correct version
"""
LOGGER.info(f'Test that language {language} is correctly installed ...')
client = docker.from_env()
output = client.containers.run('illumidesk/base-notebook:latest', f'{language} --version')
output_decoded = output.decode('utf-8').split(' ')
assert output_decoded[0:3] == version_output
LOGGER.info(f'Output from command: {output_decoded[0:3]}')
def test_invalid_cmd():
"""Ensure that an invalid command returns a docker.errors.ContainerError
"""
with pytest.raises(ContainerError):
LOGGER.info('Test an invalid command ...')
client = docker.from_env()
client.containers.run('illumidesk/base-notebook', 'foo --version')
|
the-stack_0_22487 | cardinal = {"E":(1,0), "S":(0,1), "W":(-1,0), "N":(0,-1)}
rot = {('R',90): 1, ('R',180): 2, ('R',270): 3,
('L',90): 3, ('L',180): 2, ('L',270): 1}
def distance(instructions, wx, wy, part2):
x, y = 0, 0
for (i, n) in instructions:
if i == 'L' or i == 'R':
for c in range(rot[i,n]):
wx, wy = -wy, wx
elif i == 'F':
x += wx * n
y += wy * n
elif part2:
wx += cardinal[i][0] * n
wy += cardinal[i][1] * n
else:
x += cardinal[i][0] * n
y += cardinal[i][1] * n
return abs(x) + abs(y)
with open("day12.txt", "r") as fh:
instructions = [(line[0], int(line[1:])) for line in fh.readlines()]
print("2020 day 12 part 1: %d" % distance(instructions, 1, 0, False))
print("2020 day 12 part 2: %d" % distance(instructions, 10, -1, True))
|
the-stack_0_22488 | from PyQt5 import QtCore, QtGui, QtWidgets
import sys
# This class implements a widget consisting of two lists. The first one contains a selection of
# available items, the right one a (sorted) list of chosen items. Items may be moved between the lists,
# as well as sorted within the list of chosen items.
# The widget manages the selection model for the list views adn enables/disables buttons accordingly.
# The actual logic is in the controller and BrowserState modules.
class FieldChoiceWidget(QtWidgets.QWidget):
############# Signals #############
add_button_clicked = QtCore.pyqtSignal()
remove_button_clicked = QtCore.pyqtSignal()
up_button_clicked = QtCore.pyqtSignal()
down_button_clicked = QtCore.pyqtSignal()
############# Public interface #############
def __init__(self):
super().__init__()
self.setSizePolicy (QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
# FIXME TODO debug only
# p = self.palette()
# p.setColor(self.backgroundRole(), QtCore.Qt.red)
# self.setPalette(p)
# self.setAutoFillBackground(True)
# make subwidgets
self._invisible_fields_display = QtWidgets.QListView()
self._invisible_fields_display.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self._invisible_fields_display.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
self._invisible_fields_selection_model = self._invisible_fields_display.selectionModel()
assert self._invisible_fields_selection_model is None
self._visible_fields_display = QtWidgets.QListView()
self._visible_fields_display.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self._visible_fields_display.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
self._visible_fields_selection_model = self._visible_fields_display.selectionModel()
assert self._visible_fields_selection_model is None
self._add_button = QtWidgets.QPushButton('+')
self._add_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred,QtWidgets.QSizePolicy.Expanding)
self._remove_button = QtWidgets.QPushButton('-')
self._remove_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred,QtWidgets.QSizePolicy.Expanding)
self._up_button = QtWidgets.QPushButton('UP')
self._up_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred,QtWidgets.QSizePolicy.Expanding)
self._down_button = QtWidgets.QPushButton('DOWN')
self._down_button.setSizePolicy(QtWidgets.QSizePolicy.Preferred,QtWidgets.QSizePolicy.Expanding)
self._update_button_status()
# connect
self._add_button.clicked.connect(self._slot_add_button_clicked)
self._remove_button.clicked.connect(self._slot_remove_button_clicked)
self._up_button.clicked.connect(self._slot_up_button_clicked)
self._down_button.clicked.connect(self._slot_down_button_clicked)
# note that connections regarding the selection model are made when models are set (i.e. in set_models)
# make layout
self._layout = QtWidgets.QGridLayout()
self._layout.addWidget(self._invisible_fields_display,0,0,2,1)
self._layout.addWidget(self._add_button,0,1,1,1)
self._layout.addWidget(self._remove_button,1,1,1,1)
self._layout.addWidget(self._visible_fields_display,0,2,2,1)
self._layout.addWidget(self._up_button,0,3,1,1)
self._layout.addWidget(self._down_button,1,3,1,1)
self.setLayout(self._layout)
def set_models(self,invisible_fields_model,visible_fields_model):
self._invisible_fields_display.setModel(invisible_fields_model)
self._visible_fields_display.setModel(visible_fields_model)
# this has automatically created selection models, TODO possible delete old selection model
self._invisible_fields_selection_model = self._invisible_fields_display.selectionModel()
self._visible_fields_selection_model = self._visible_fields_display.selectionModel()
self._invisible_fields_selection_model.selectionChanged.connect(self._on_new_invisible_selection)
self._visible_fields_selection_model.selectionChanged.connect(self._on_new_visible_selection)
# Get the currently selected row, or None if there is no selection
def get_invisible_fields_selected_row(self):
if self._invisible_fields_selection_model is None:
return None
sel_rows = self._invisible_fields_selection_model.selectedRows()
assert len(sel_rows) <= 1
if len(sel_rows) == 0:
return None
else:
return sel_rows[0].row()
def get_visible_fields_selected_row(self):
if self._visible_fields_selection_model is None:
return None
sel_rows = self._visible_fields_selection_model.selectedRows()
assert len(sel_rows) <= 1
if len(sel_rows) == 0:
return None
else:
return sel_rows[0].row()
############# Internal Implementation #############
def _on_new_invisible_selection(self):
print('Field choice: new invisible selection')
self._update_button_status()
def _on_new_visible_selection(self):
print('Field choice: new visible selection')
self._update_button_status()
def _slot_add_button_clicked(self):
self.add_button_clicked.emit()
# required since the selection has moved
self._update_button_status()
def _slot_remove_button_clicked(self):
self.remove_button_clicked.emit()
# required since the selection has moved
self._update_button_status()
def _slot_up_button_clicked(self):
self.up_button_clicked.emit()
# required since the selection has moved
self._update_button_status()
def _slot_down_button_clicked(self):
self.down_button_clicked.emit()
# required since the selection has moved
self._update_button_status()
def _update_button_status(self):
# disable everything if any model is missing
if (
self._invisible_fields_display.model() is None or self._invisible_fields_selection_model is None or
self._visible_fields_display.model() is None or self._visible_fields_selection_model is None ):
self._add_button.setEnabled(False)
self._remove_button.setEnabled(False)
self._up_button.setEnabled(False)
self._down_button.setEnabled(False)
return
# check in detail
inv_selected_row = self.get_invisible_fields_selected_row()
inv_row_count = self._invisible_fields_display.model().rowCount(QtCore.QModelIndex())
vis_selected_row = self.get_visible_fields_selected_row()
vis_row_count = self._visible_fields_display.model().rowCount(QtCore.QModelIndex())
self._add_button.setEnabled(inv_selected_row is not None)
self._remove_button.setEnabled(vis_selected_row is not None)
self._up_button.setEnabled(vis_selected_row is not None and vis_selected_row > 0)
self._down_button.setEnabled(vis_selected_row is not None and vis_selected_row < vis_row_count-1)
|
the-stack_0_22490 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openjpeg(CMakePackage):
"""OpenJPEG is an open-source JPEG 2000 codec written in C language.
It has been developed in order to promote the use of JPEG 2000, a
still-image compression standard from the Joint Photographic
Experts Group (JPEG).
Since April 2015, it is officially recognized by ISO/IEC and
ITU-T as a JPEG 2000 Reference Software.
"""
homepage = 'https://github.com/uclouvain/openjpeg'
url = 'https://github.com/uclouvain/openjpeg/archive/v2.3.1.tar.gz'
version('2.4.0', sha256='8702ba68b442657f11aaeb2b338443ca8d5fb95b0d845757968a7be31ef7f16d')
version('2.3.1', sha256='63f5a4713ecafc86de51bfad89cc07bb788e9bba24ebbf0c4ca637621aadb6a9')
version('2.3.0', sha256='3dc787c1bb6023ba846c2a0d9b1f6e179f1cd255172bde9eb75b01f1e6c7d71a')
version('2.2.0', sha256='6fddbce5a618e910e03ad00d66e7fcd09cc6ee307ce69932666d54c73b7c6e7b')
version('2.1.2', sha256='4ce77b6ef538ef090d9bde1d5eeff8b3069ab56c4906f083475517c2c023dfa7')
version('2.1.1', sha256='82c27f47fc7219e2ed5537ac69545bf15ed8c6ba8e6e1e529f89f7356506dbaa')
version('2.1.0', sha256='4afc996cd5e0d16360d71c58216950bcb4ce29a3272360eb29cadb1c8bce4efc')
version('2.0.1', sha256='f184d402a218359184fd162075bb5246a68165b9776678185b6a379c49093816')
version('2.0.0', sha256='5480f801a9f88af1a456145e41f3adede1dfae425bbac66a19c7eeeba94a1249')
version('1.5.2', sha256='3734e95edd0bef6e056815591755efd822228dc3cd866894e00a2c929026b16d')
version('1.5.1', sha256='6a42fcc23cb179f69a1e94429089e5a5926aee1ffe582a0a6bd91299d297e61a')
variant('codec', default=False, description='Build the CODEC executables')
depends_on('zlib', when='+codec')
depends_on('libpng', when='+codec')
depends_on('libtiff', when='+codec')
depends_on('lcms', when='+codec')
# The problem with install name of the library on MacOs was fixed starting
# version 2.1.1: https://github.com/uclouvain/openjpeg/commit/b9a247b559e62e55f5561624cf4a19aee3c8afdc
# The solution works for the older versions (at least starting 1.5.1) too.
patch('macos.patch', when='@:2.1.0 platform=darwin')
def url_for_version(self, version):
if version >= Version('2.1.1'):
return super(Openjpeg, self).url_for_version(version)
# Before version 2.2.0, release tarballs of the versions like x.y.0
# did not have the ".0" in their names:
if version[2] == 0:
version = version.up_to(2)
url_fmt = \
'https://github.com/uclouvain/openjpeg/archive/version.{0}.tar.gz'
return url_fmt.format(version)
@property
def libs(self):
return find_libraries('libopenjp{0}'.format(self.version.up_to(1)),
root=self.prefix, recursive=True)
def cmake_args(self):
args = [
self.define_from_variant('BUILD_CODEC', 'codec'),
# MJ2 executables are disabled by default and we just make it
# explicit. Note that the executables require additional libraries
# as in the case '+codec', therefore, we will need to update the
# 'depends_on' directives when/if we introduce a variant that
# enables them.
self.define('BUILD_MJ2', False),
# Note that if the list of dependencies is incomplete, there is
# still a chance that the bundled third-party libraries get built.
self.define('BUILD_THIRDPARTY', False)
]
return args
|
the-stack_0_22491 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import shutil
import argparse
import subprocess
import numpy as np
import contextlib
import onnx
from cvi_toolkit.utils.mlir_shell import *
from cvi_toolkit.utils.intermediate_file import IntermediateFile
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
class ModelTest(object):
def __init__(self, chip, model_path, batch_size):
self.chip = chip
self.model_path = model_path
self.batch_size = batch_size
self.model_name = os.path.split(model_path)[-1].split(".")[0]
self.fp32_mlir = self.model_name + ".mlir"
self.cvimodel = self.model_name + ".cvimodel"
self.input_path = "./input.npz"
def __make_test_calibration_table__(self, table_name):
blobs_interp_npz = IntermediateFile(self.model_name, 'full_precision_interp.npz', False)
ret = mlir_inference(self.fp32_mlir, self.input_path, None, str(blobs_interp_npz))
if ret != 0:
raise RuntimeError("{} mlir inference failed".format(self.model_path))
tensors = np.load(str(blobs_interp_npz))
with open(table_name, "w") as f:
for name in tensors:
threshold = np.abs(np.max(tensors[name]))
if np.isnan(threshold):
threshold = 10.0
elif threshold >= 127.0:
threshold = 127.0
elif threshold <= 0.001:
threshold = 1.0
else:
pass
f.write("{} {}\n".format(name, threshold))
def run(self, quant_mode, input=None):
if self.model_path.endswith(".onnx"):
onnx_model = onnx.load(self.model_path)
input_nodes = onnx_model.graph.input
self.__gen_onnx_input__(input_nodes)
transform_cmd = [
'model_transform.py', '--model_type', 'onnx', '--model_name', self.model_name, '--model_def', self.model_path,
'--image', self.input_path, '--net_input_dims', '1,100', '--tolerance', '0.99,0.99,0.99', '--mlir',
self.fp32_mlir
]
subprocess.run(transform_cmd)
elif self.model_path.endswith(".mlir"):
tmp_mlir_file = IntermediateFile(self.model_name, 'fp32.mlir.tmp', False)
op_info_csv = IntermediateFile(self.model_name, 'op_info.csv', True)
ret = mlir_pseudo_weight(self.model_path, str(tmp_mlir_file))
ret = mlir_opt(str(tmp_mlir_file), self.fp32_mlir, str(op_info_csv))
if ret != 0:
raise RuntimeError("{} opt failed".format(self.model_path))
if quant_mode in ['bf16', 'mix_bf16']:
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--quantize',
quant_mode.upper(), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.99,0.99,0.87', '--correctness', '0.99,0.99,0.95', '--debug',
'--cvimodel', self.cvimodel
]
elif "int8" == quant_mode:
# simple cali and convert to cvimodel
table_file = IntermediateFile(self.model_name, 'calibration_table', True)
self.__make_test_calibration_table__(str(table_file))
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--calibration_table',
str(table_file), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.10,0.10,0.1', '--correctness', '0.99,0.99,0.93', '--debug',
'--cvimodel', self.cvimodel
]
else:
raise ValueError("Now just support bf16/int8")
subprocess.run(deploy_cmd)
def __gen_onnx_input__(self, input_nodes):
self.input_data = {}
for input in input_nodes:
input_shape = []
for i, dim in enumerate(input.type.tensor_type.shape.dim):
if i == 0 and dim.dim_value <= 0 and self.batch_size != 0:
input_shape.append(self.batch_size)
else:
input_shape.append(dim.dim_value)
if 1 == input.type.tensor_type.elem_type: # 1 for np.float32
self.input_data[input.name] = np.random.randn(*input_shape).astype(np.float32)
# self.input_data[input.name] = np.random.uniform(1, 6, input_shape).astype(np.float32)
elif 7 == input.type.tensor_type.elem_type: # 7 for np.int64 / torch.long
self.input_data[input.name] = np.random.randint(0, 3, input_shape).astype(np.int64)
elif 9 == input.type.tensor_type.elem_type: # 9 for boolean
self.input_data[input.name] = np.random.randint(0, 2, input_shape).astype(np.float32)
else:
raise ValueError("Not support now, add here")
np.savez("input.npz", **self.input_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="model definition file, mlir or onnx")
parser.add_argument("--quantize", choices=['bf16', 'int8', 'mix_bf16'], default="bf16", help="quant mode")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--chip", type=str, default="cv182x", help="chip type")
parser.add_argument("--out_dir", type=str, default="tmp", help="out folder")
# parser.add_argument("--excepts", default='-', help="excepts")
# parser.add_argument("--graph", action='store_true', help="generate graph to pb file")
args = parser.parse_args()
if os.path.exists(args.out_dir):
shutil.rmtree(args.out_dir)
os.makedirs(args.out_dir)
tmp_model_file = os.path.split(args.model)[-1]
shutil.copy(args.model, os.path.join(args.out_dir, tmp_model_file))
with pushd(args.out_dir):
tool = ModelTest(args.chip, tmp_model_file, args.batch_size)
tool.run(args.quantize)
|
the-stack_0_22492 | # -*- coding: utf-8 -*-
"""
Custom scripts to visualize results/plot automatically elsewhere in the repository.
Import this file as module in the desired exploratory notebook using the following code:
# import local package modules
# Ref: https://github.com/manifoldai/docker-cookiecutter-data-science/issues/4
import sys
sys.path.append("..")
from src.visualization import visualize as viz
"""
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from scipy.interpolate import griddata
def make_surface(x, y, z, **kwargs):
# interpret kwargs
fig_title = kwargs.pop('fig_title')
x_title = kwargs.pop('x_title')
y_title = kwargs.pop('y_title')
z_title = kwargs.pop('z_title')
# make plot coords
xi = np.linspace(min(x), max(x), num=100)
yi = np.linspace(min(y), max(y), num=100)
x_grid, y_grid = np.meshgrid(xi,yi)
#Grid data
z_grid = griddata((x,y),z,(x_grid,y_grid),method='cubic')
# Plotly 3D Surface
fig = go.Figure(go.Surface(x=x_grid,y=y_grid,z=z_grid,
colorscale='viridis'))
fig.update_layout(scene = dict(
xaxis_title=x_title,
yaxis_title=y_title,
zaxis_title=z_title), title = fig_title,
width=700,
margin=dict(r=20, b=10, l=10, t=10))
fig.show()
return
def make_3D_scatter(X, Y, Z, **kwargs):
'''Create a 3D scatter plot based on individual datapoints.'''
# fill results that are TBD with zero for now
Z = np.asarray([num if str(num) != "nan" else 0 for num in Z])
ax = plt.axes(projection='3d')
xlabel = kwargs.pop('xlabel')
ylabel = kwargs.pop('ylabel')
zlabel = kwargs.pop('zlabel')
ax.scatter(X, Y, Z, c=Z, cmap = 'viridis',linewidth=0.5);
plt.xlabel(xlabel, labelpad=10)
plt.ylabel(ylabel, labelpad=10)
ax.zaxis.set_label_text(zlabel)
ax.ticklabel_format(axis='both', style='plain')
plt.show()
return
def make_3D_surface(X, Y, Z, **kwargs):
'''Makes a triangulated 3D surface plot based on individual datapoints.'''
# fill any results that are TBD with zero for now
Z = np.asarray([num if str(num) != "nan" else 0 for num in Z])
ax = plt.axes(projection='3d')
xlabel = kwargs.pop('xlabel')
ylabel = kwargs.pop('ylabel')
zlabel = kwargs.pop('zlabel')
ax.plot_trisurf(X, Y, Z, edgecolor='none');
plt.xlabel(xlabel, labelpad=10)
plt.ylabel(ylabel, labelpad=10)
ax.zaxis.set_label_text(zlabel)
ax.ticklabel_format(axis='both', style='plain')
plt.show() |
the-stack_0_22493 | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
The module contains the :class:`Sequence` superclass and :class:`GeneralSequence`.
"""
__name__ = "biotite.sequence"
__author__ = "Patrick Kunzmann"
__all__ = ["Sequence"]
import numbers
import abc
import numpy as np
from .alphabet import Alphabet, LetterAlphabet
from ..copyable import Copyable
_size_uint8 = np.iinfo(np.uint8 ).max +1
_size_uint16 = np.iinfo(np.uint16).max +1
_size_uint32 = np.iinfo(np.uint32).max +1
class Sequence(Copyable, metaclass=abc.ABCMeta):
"""
The abstract base class for all sequence types.
A :class:`Sequence` can be seen as a succession of symbols, that are
elements in the allowed set of symbols, the :class:`Alphabet`.
Internally, a :class:`Sequence` object uses a *NumPy*
:class:`ndarray` of integers, where each integer represents a
symbol.
The :class:`Alphabet` of a :class:`Sequence` object is used to
encode each symbol, that is used to create the
:class:`Sequence`, into an integer. These integer values are called
symbol code, the encoding of an entire sequence of symbols is
called sequence code.
The size of the symbol code type in the array is determined by the
size of the :class:`Alphabet`:
If the :class:`Alphabet` contains 256 symbols or less, one byte is
used per array element; if the :class:`Alphabet` contains
between 257 and 65536 symbols, two bytes are used, and so on.
Two :class:`Sequence` objects are equal if they are instances of the
same class, have the same :class:`Alphabet` and have equal sequence
codes.
Comparison with a string or list of symbols evaluates always to
false.
A :class:`Sequence` can be indexed by any 1-D index a
:class:`ndarray` accepts.
If the index is a single integer, the decoded symbol at that
position is returned, otherwise a subsequence is returned.
Individual symbols of the sequence can also be exchanged in indexed
form: If the an integer is used as index, the item is treated as a
symbol. Any other index (slice, index list, boolean mask) expects
multiple symbols, either as list of symbols, as :class:`ndarray`
containing a sequence code or another :class:`Sequence` instance.
Concatenation of two sequences is achieved with the '+' operator.
Each subclass of :class:`Sequence` needs to overwrite the abstract
method :func:`get_alphabet()`, which specifies the alphabet the
:class:`Sequence` uses.
Parameters
----------
sequence : iterable object, optional
The symbol sequence, the :class:`Sequence` is initialized with.
For alphabets containing single letter strings, this parameter
may also be a :class`str` object.
By default the sequence is empty.
Attributes
----------
code : ndarray
The sequence code.
symbols : list
The list of symbols, represented by the sequence.
The list is generated by decoding the sequence code, when
this attribute is accessed. When this attribute is modified,
the new list of symbols is encoded into the sequence code.
alphabet : Alphabet
The alphabet of this sequence. Cannot be set.
Equal to `get_alphabet()`.
Examples
--------
Creating a DNA sequence from string and print the symbols and the
code:
>>> dna_seq = NucleotideSequence("ACGTA")
>>> print(dna_seq)
ACGTA
>>> print(dna_seq.code)
[0 1 2 3 0]
>>> print(dna_seq.symbols)
['A' 'C' 'G' 'T' 'A']
>>> print(list(dna_seq))
['A', 'C', 'G', 'T', 'A']
Sequence indexing:
>>> print(dna_seq[1:3])
CG
>>> print(dna_seq[[0,2,4]])
AGA
>>> print(dna_seq[np.array([False,False,True,True,True])])
GTA
Sequence manipulation:
>>> dna_copy = dna_seq.copy()
>>> dna_copy[2] = "C"
>>> print(dna_copy)
ACCTA
>>> dna_copy = dna_seq.copy()
>>> dna_copy[0:2] = dna_copy[3:5]
>>> print(dna_copy)
TAGTA
>>> dna_copy = dna_seq.copy()
>>> dna_copy[np.array([True,False,False,False,True])] = "T"
>>> print(dna_copy)
TCGTT
>>> dna_copy = dna_seq.copy()
>>> dna_copy[1:4] = np.array([0,1,2])
>>> print(dna_copy)
AACGA
Reverse sequence:
>>> dna_seq_rev = dna_seq.reverse()
>>> print(dna_seq_rev)
ATGCA
Concatenate the two sequences:
>>> dna_seq_concat = dna_seq + dna_seq_rev
>>> print(dna_seq_concat)
ACGTAATGCA
"""
def __init__(self, sequence=()):
self.symbols = sequence
def copy(self, new_seq_code=None):
"""
Copy the object.
Parameters
----------
new_seq_code : ndarray, optional
If this parameter is set, the sequence code is set to this
value, rather than the original sequence code.
Returns
-------
copy
A copy of this object.
"""
# Override in order to achieve better performance,
# in case only a subsequence is needed,
# because not the entire sequence code is copied then
clone = self.__copy_create__()
if new_seq_code is None:
clone.code = np.copy(self.code)
else:
clone.code = new_seq_code
self.__copy_fill__(clone)
return clone
@property
def symbols(self):
return self.get_alphabet().decode_multiple(self.code)
@symbols.setter
def symbols(self, value):
alph = self.get_alphabet()
dtype = Sequence._dtype(len(alph))
self._seq_code = alph.encode_multiple(value, dtype)
@property
def code(self):
return self._seq_code
@code.setter
def code(self, value):
dtype = Sequence._dtype(len(self.get_alphabet()))
if not isinstance(value, np.ndarray):
raise TypeError("Sequence code must be an integer ndarray")
self._seq_code = value.astype(dtype, copy=False)
@property
def alphabet(self):
return self.get_alphabet()
@abc.abstractmethod
def get_alphabet(self):
"""
Get the :class:`Alphabet` of the :class:`Sequence`.
This method must be overwritten, when subclassing
:class:`Sequence`.
Returns
-------
alphabet : Alphabet
:class:`Sequence` alphabet.
"""
pass
def reverse(self):
"""
Reverse the :class:`Sequence`.
Returns
-------
reversed : Sequence
The reversed :class:`Sequence`.
Examples
--------
>>> dna_seq = NucleotideSequence("ACGTA")
>>> dna_seq_rev = dna_seq.reverse()
>>> print(dna_seq_rev)
ATGCA
"""
reversed_code = np.flip(np.copy(self._seq_code), axis=0)
reversed = self.copy(reversed_code)
return reversed
def is_valid(self):
"""
Check, if the sequence contains a valid sequence code.
A sequence code is valid, if at each sequence position the
code is smaller than the size of the alphabet.
Invalid code means that the code cannot be decoded into
symbols. Furthermore invalid code can lead to serious
errors in alignments, since the substitution matrix
is indexed with an invalid index.
Returns
-------
valid : bool
True, if the sequence is valid, false otherwise.
"""
return (self.code < len(self.get_alphabet())).all()
def get_symbol_frequency(self):
"""
Get the number of occurences of each symbol in the sequence.
If a symbol does not occur in the sequence, but it is in the
alphabet, its number of occurences is 0.
Returns
-------
frequency : dict
A dictionary containing the symbols as keys and the
corresponding number of occurences in the sequence as
values.
"""
frequencies = {}
for code, symbol in enumerate(self.get_alphabet()):
frequencies[symbol] = len(np.nonzero((self._seq_code == code))[0])
return frequencies
def __getitem__(self, index):
alph = self.get_alphabet()
sub_seq = self._seq_code.__getitem__(index)
if isinstance(sub_seq, np.ndarray):
return self.copy(sub_seq)
else:
return alph.decode(sub_seq)
def __setitem__(self, index, item):
alph = self.get_alphabet()
if isinstance(index, numbers.Integral):
# Expect a single symbol
code = alph.encode(item)
else:
# Expect multiple symbols
if isinstance(item, Sequence):
code = item.code
elif isinstance(item, np.ndarray):
code = item
else:
# Default: item is iterable object of symbols
code = alph.encode_multiple(item)
self._seq_code.__setitem__(index, code)
def __len__(self):
return len(self._seq_code)
def __iter__(self):
alph = self.get_alphabet()
i = 0
while i < len(self):
yield alph.decode(self._seq_code[i])
i += 1
def __eq__(self, item):
if not isinstance(item, type(self)):
return False
if self.get_alphabet() != item.get_alphabet():
return False
return np.array_equal(self._seq_code, item._seq_code)
def __str__(self):
alph = self.get_alphabet()
if isinstance(alph, LetterAlphabet):
return alph.decode_multiple(self._seq_code, as_bytes=True)\
.tobytes().decode("ASCII")
else:
return "".join(alph.decode_multiple(self._seq_code))
def __add__(self, sequence):
if self.get_alphabet().extends(sequence.get_alphabet()):
new_code = np.concatenate((self._seq_code, sequence._seq_code))
new_seq = self.copy(new_code)
return new_seq
elif sequence.get_alphabet().extends(self.get_alphabet()):
new_code = np.concatenate((self._seq_code, sequence._seq_code))
new_seq = sequence.copy(new_code)
return new_seq
else:
raise ValueError("The sequences alphabets are not compatible")
@staticmethod
def _dtype(alphabet_size):
if alphabet_size <= _size_uint8:
return np.uint8
elif alphabet_size <= _size_uint16:
return np.uint16
elif alphabet_size <= _size_uint32:
return np.uint32
else:
return np.uint64
|
the-stack_0_22494 | from . import main
from flask import render_template, url_for, abort, request, redirect
from flask_login import login_required, current_user
from ..models import User, Pitch, Upvote, Downvote, Comment
from .forms import UpdateForm, PitchForm, CommentForm
from .. import db, photos
@main.route('/')
def index():
"""main view function"""
all_pitches = Pitch.query.all()
title = "Flask Pitch Application"
return render_template("index.html", all_pitches=all_pitches, title=title)
@main.route('/profile/<my_name>')
@login_required
def profile(my_name):
title = "Flask Profile"
user = User.query.filter_by(username=my_name).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user=user, title=title)
@main.route('/update/<my_name>', methods=['GET', 'POST'])
@login_required
def edit_profile(my_name):
title = "Edit profile"
user = User.query.filter_by(username=my_name).first()
if user is None:
abort(404)
update_form = UpdateForm()
if update_form.validate_on_submit():
user.biography = update_form.biography.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', my_name=user.username))
return render_template("profile/update.html", update_form=update_form, title=title)
@main.route('/updateImage/<my_name>', methods=['POST'])
@login_required
def update_image(my_name):
title = "Update image"
user = User.query.filter_by(username=my_name).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic = path
db.session.commit()
return redirect(url_for('main.profile', my_name=my_name, title=title))
@main.route('/new-pitch/<id>', methods=['GET', 'POST'])
@login_required
def pitch(id):
"""New Pitch function"""
title = "New Pitch"
pitch_form = PitchForm()
if pitch_form.validate_on_submit():
title = pitch_form.title.data
category = pitch_form.category.data
pitch = pitch_form.pitch.data
new_pitch = Pitch(title=title, category=category, pitch=pitch, user=current_user)
new_pitch.save_pitch()
return redirect(url_for('.index'))
return render_template('pitch.html', pitch_form=pitch_form, title=title)
@main.route('/upvote/<id>', methods=['GET', 'POST'])
@login_required
def upVote(id):
votes = Upvote.get_upvotes(id)
output = f'{current_user.id}:{id}'
for vote in votes:
result = f'{vote}'
if output == result:
return redirect(url_for('main.index', id=id))
else:
continue
new_upvote = Upvote(user=current_user, pitch_id=id)
new_upvote.save()
return redirect(url_for('main.index', id=id))
@main.route('/downvote/<id>', methods=['GET', 'POST'])
@login_required
def downVote(id):
votes = Downvote.get_downvotes(id)
output = f'{current_user.id}:{id}'
for vote in votes:
result = f'{vote}'
if output == result:
return redirect(url_for('main.index', id=id))
else:
continue
new_downvote = Downvote(user=current_user, pitch_id=id)
new_downvote.save()
return redirect(url_for('main.index', id=id))
@main.route('/comment/<id>', methods=['GET', 'POST'])
@login_required
def comment(id):
comment_form = CommentForm()
pitch = Pitch.query.get(id)
fetch_all_comments = Comment.query.filter_by(pitch_id=id).all()
if comment_form.validate_on_submit():
comment = comment_form.comment.data
pitch_id = id
user_id = current_user._get_current_object().id
new_comment = Comment(comment=comment, user_id=user_id, pitch_id=pitch_id)
new_comment.save_comment()
return redirect(url_for('.comment', id=pitch_id))
return render_template('comments.html', comment_form=comment_form, pitch=pitch, all_comments=fetch_all_comments)
#######################################Categories##################################
# pickup lines, interview pitch, product pitch, promotion pitch.
@main.route('/pickup')
def pickup():
# pitches = Pitch.query.all()
title="Pickup Line Pitch"
pickup = Pitch.query.filter_by(category='pickup').all()
return render_template('category/pickup.html', all_pitches=pickup,title=title)
@main.route('/interview')
def interview():
# pitches = Pitch.query.all()
title="Interview Pitch"
interview = Pitch.query.filter_by(category='interview').all()
return render_template('category/interview.html', all_pitches=interview,title=title)
@main.route('/product')
def product():
# pitches = Pitch.query.all()
title="Product Pitch"
product = Pitch.query.filter_by(category='product').all()
return render_template('category/product.html', all_pitches=product,title=title)
@main.route('/promotion')
def promotion():
# pitches = Pitch.query.all()
title="Promotion Pitch"
promotion = Pitch.query.filter_by(category='promotion').all()
print(promotion)
return render_template('category/promotion.html', all_pitches=promotion,title=title) |
the-stack_0_22495 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 Dmitriy Yefremov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Dmitriy Yefremov
#
""" Module for IPTV and streams support """
import re
from enum import Enum
from urllib.parse import unquote, quote
from app.commons import log
from app.eparser.ecommons import BqServiceType, Service
from app.settings import SettingsType
from app.ui.uicommons import IPTV_ICON
# url, description, urlkey, account, usrname, psw, s_type, iconsrc, iconsrc_b, group
NEUTRINO_FAV_ID_FORMAT = "{}::{}::{}::{}::{}::{}::{}::{}::{}::{}"
ENIGMA2_FAV_ID_FORMAT = " {}:{}:{}:{:X}:{:X}:{:X}:{:X}:0:0:0:{}:{}\n#DESCRIPTION: {}\n"
MARKER_FORMAT = " 1:64:{}:0:0:0:0:0:0:0::{}\n#DESCRIPTION {}\n"
class StreamType(Enum):
DVB_TS = "1"
NONE_TS = "4097"
NONE_REC_1 = "5001"
NONE_REC_2 = "5002"
E_SERVICE_URI = "8193"
E_SERVICE_HLS = "8739"
def parse_m3u(path, s_type, detect_encoding=True, params=None):
with open(path, "rb") as file:
data = file.read()
encoding = "utf-8"
if detect_encoding:
try:
import chardet
except ModuleNotFoundError:
pass
else:
enc = chardet.detect(data)
encoding = enc.get("encoding", "utf-8")
aggr = [None] * 10
s_aggr = aggr[: -3]
services = []
groups = set()
marker_counter = 1
sid_counter = 1
name = None
picon = None
p_id = "1_0_1_0_0_0_0_0_0_0.png"
st = BqServiceType.IPTV.name
params = params or [0, 0, 0, 0]
for line in str(data, encoding=encoding, errors="ignore").splitlines():
if line.startswith("#EXTINF"):
line, sep, name = line.rpartition(",")
data = re.split('"', line)
size = len(data)
if size < 3:
continue
d = {data[i].lower().strip(" ="): data[i + 1] for i in range(0, len(data) - 1, 2)}
picon = d.get("tvg-logo", None)
grp_name = d.get("group-title", None)
if grp_name not in groups:
groups.add(grp_name)
fav_id = MARKER_FORMAT.format(marker_counter, grp_name, grp_name)
marker_counter += 1
mr = Service(None, None, None, grp_name, *aggr[0:3], BqServiceType.MARKER.name, *aggr, fav_id, None)
services.append(mr)
elif line.startswith("#EXTGRP") and s_type is SettingsType.ENIGMA_2:
grp_name = line.strip("#EXTGRP:").strip()
if grp_name not in groups:
groups.add(grp_name)
fav_id = MARKER_FORMAT.format(marker_counter, grp_name, grp_name)
marker_counter += 1
mr = Service(None, None, None, grp_name, *aggr[0:3], BqServiceType.MARKER.name, *aggr, fav_id, None)
services.append(mr)
elif not line.startswith("#"):
url = line.strip()
params[0] = sid_counter
sid_counter += 1
fav_id = get_fav_id(url, name, s_type, params)
if all((name, url, fav_id)):
srv = Service(None, None, IPTV_ICON, name, *aggr[0:3], st, picon, p_id, *s_aggr, url, fav_id, None)
services.append(srv)
else:
log(f"*.m3u* parse error ['{path}']: name[{name}], url[{url}], fav id[{fav_id}]")
return services
def export_to_m3u(path, bouquet, s_type, url=None):
pattern = re.compile(".*:(http.*):.*") if s_type is SettingsType.ENIGMA_2 else re.compile("(http.*?)::::.*")
lines = ["#EXTM3U\n"]
current_grp = None
for s in bouquet.services:
s_type = s.type
if s_type is BqServiceType.IPTV:
res = re.match(pattern, s.data)
if not res:
continue
lines.append(f"#EXTINF:-1,{s.name}\n")
lines.append(current_grp) if current_grp else None
lines.append(f"{unquote(res.group(1).strip())}\n")
elif s_type is BqServiceType.MARKER:
current_grp = f"#EXTGRP:{s.name}\n"
elif s_type is BqServiceType.DEFAULT and url:
lines.append(f"#EXTINF:-1,{s.name}\n")
lines.append(current_grp) if current_grp else None
lines.append(f"{url}{s.data}\n")
with open(f"{path}{bouquet.name}.m3u", "w", encoding="utf-8") as file:
file.writelines(lines)
def get_fav_id(url, name, settings_type, params=None, st_type=None, s_id=0, srv_type=1):
""" Returns fav id depending on the profile. """
if settings_type is SettingsType.ENIGMA_2:
st_type = st_type or StreamType.NONE_TS.value
params = params or (0, 0, 0, 0)
return ENIGMA2_FAV_ID_FORMAT.format(st_type, s_id, srv_type, *params, quote(url), name, name, None)
elif settings_type is SettingsType.NEUTRINO_MP:
return NEUTRINO_FAV_ID_FORMAT.format(url, "", 0, None, None, None, None, "", "", 1)
if __name__ == "__main__":
pass
|
the-stack_0_22496 | from django import forms
from django.conf import settings
from django.utils import timezone
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from wagtail.admin import widgets
from wagtail.core.models import Page, PageViewRestriction
from .models import WagtailAdminModelForm
from .view_restrictions import BaseViewRestrictionForm
class CopyForm(forms.Form):
def __init__(self, *args, **kwargs):
# CopyPage must be passed a 'page' kwarg indicating the page to be copied
self.page = kwargs.pop('page')
self.user = kwargs.pop('user', None)
can_publish = kwargs.pop('can_publish')
super().__init__(*args, **kwargs)
self.fields['new_title'] = forms.CharField(initial=self.page.title, label=_("New title"))
allow_unicode = getattr(settings, 'WAGTAIL_ALLOW_UNICODE_SLUGS', True)
self.fields['new_slug'] = forms.SlugField(initial=self.page.slug, label=_("New slug"), allow_unicode=allow_unicode)
self.fields['new_parent_page'] = forms.ModelChoiceField(
initial=self.page.get_parent(),
queryset=Page.objects.all(),
widget=widgets.AdminPageChooser(can_choose_root=True, user_perms='copy_to'),
label=_("New parent page"),
help_text=_("This copy will be a child of this given parent page.")
)
pages_to_copy = self.page.get_descendants(inclusive=True)
subpage_count = pages_to_copy.count() - 1
if subpage_count > 0:
self.fields['copy_subpages'] = forms.BooleanField(
required=False, initial=True, label=_("Copy subpages"),
help_text=ngettext(
"This will copy %(count)s subpage.",
"This will copy %(count)s subpages.",
subpage_count) % {'count': subpage_count})
if can_publish:
pages_to_publish_count = pages_to_copy.live().count()
if pages_to_publish_count > 0:
# In the specific case that there are no subpages, customise the field label and help text
if subpage_count == 0:
label = _("Publish copied page")
help_text = _("This page is live. Would you like to publish its copy as well?")
else:
label = _("Publish copies")
help_text = ngettext(
"%(count)s of the pages being copied is live. Would you like to publish its copy?",
"%(count)s of the pages being copied are live. Would you like to publish their copies?",
pages_to_publish_count) % {'count': pages_to_publish_count}
self.fields['publish_copies'] = forms.BooleanField(
required=False, initial=False, label=label, help_text=help_text
)
# Note that only users who can publish in the new parent page can create an alias.
# This is because alias pages must always match their original page's state.
self.fields['alias'] = forms.BooleanField(
required=False, initial=False, label=_("Alias"),
help_text=_("Keep the new pages updated with future changes")
)
def clean(self):
cleaned_data = super().clean()
# Make sure the slug isn't already in use
slug = cleaned_data.get('new_slug')
# New parent page given in form or parent of source, if parent_page is empty
parent_page = cleaned_data.get('new_parent_page') or self.page.get_parent()
# check if user is allowed to create a page at given location.
if not parent_page.permissions_for_user(self.user).can_add_subpage():
self._errors['new_parent_page'] = self.error_class([
_("You do not have permission to copy to page \"%(page_title)s\"") % {'page_title': parent_page.specific_deferred.get_admin_display_title()}
])
# Count the pages with the same slug within the context of our copy's parent page
if slug and parent_page.get_children().filter(slug=slug).count():
self._errors['new_slug'] = self.error_class(
[_("This slug is already in use within the context of its parent page \"%s\"") % parent_page]
)
# The slug is no longer valid, hence remove it from cleaned_data
del cleaned_data['new_slug']
# Don't allow recursive copies into self
if cleaned_data.get('copy_subpages') and (self.page == parent_page or parent_page.is_descendant_of(self.page)):
self._errors['new_parent_page'] = self.error_class(
[_("You cannot copy a page into itself when copying subpages")]
)
return cleaned_data
class PageViewRestrictionForm(BaseViewRestrictionForm):
class Meta:
model = PageViewRestriction
fields = ('restriction_type', 'password', 'groups')
class WagtailAdminPageForm(WagtailAdminModelForm):
comment_notifications = forms.BooleanField(widget=forms.CheckboxInput(), required=False)
# Could be set to False by a subclass constructed by TabbedInterface
show_comments_toggle = True
class Meta:
# (dealing with Treebeard's tree-related fields that really should have
# been editable=False)
exclude = ['content_type', 'path', 'depth', 'numchild']
def __init__(self, data=None, files=None, parent_page=None, subscription=None, *args, **kwargs):
self.subscription = subscription
initial = kwargs.pop('initial', {})
if self.subscription:
initial['comment_notifications'] = subscription.comment_notifications
super().__init__(data, files, *args, initial=initial, **kwargs)
self.parent_page = parent_page
if not self.show_comments_toggle:
del self.fields['comment_notifications']
def save(self, commit=True):
# Save comment notifications updates to PageSubscription
if self.show_comments_toggle and self.subscription:
self.subscription.comment_notifications = self.cleaned_data['comment_notifications']
if commit:
self.subscription.save()
return super().save(commit=commit)
def is_valid(self):
comments = self.formsets.get('comments')
# Remove the comments formset if the management form is invalid
if comments and not comments.management_form.is_valid():
del self.formsets['comments']
return super().is_valid()
def clean(self):
cleaned_data = super().clean()
if 'slug' in self.cleaned_data:
if not Page._slug_is_available(
cleaned_data['slug'], self.parent_page, self.instance
):
self.add_error('slug', forms.ValidationError(_("This slug is already in use")))
# Check scheduled publishing fields
go_live_at = cleaned_data.get('go_live_at')
expire_at = cleaned_data.get('expire_at')
# Go live must be before expire
if go_live_at and expire_at:
if go_live_at > expire_at:
msg = _('Go live date/time must be before expiry date/time')
self.add_error('go_live_at', forms.ValidationError(msg))
self.add_error('expire_at', forms.ValidationError(msg))
# Expire at must be in the future
if expire_at and expire_at < timezone.now():
self.add_error('expire_at', forms.ValidationError(_('Expiry date/time must be in the future')))
# Don't allow an existing first_published_at to be unset by clearing the field
if 'first_published_at' in cleaned_data and not cleaned_data['first_published_at']:
del cleaned_data['first_published_at']
return cleaned_data
|
the-stack_0_22497 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('print_options03.xlsx')
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.center_vertically()
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
the-stack_0_22498 | '''
numpy implementation of 2d box intersection with test cases.
to demonstrate the idea and validate the result of torch implementation
author: lanxiao li
2020.8
'''
import numpy as np
import matplotlib.pyplot as plt
EPSILON = 1e-8
def line_seg_intersection(line1:np.array, line2:np.array):
"""find intersection of 2 lines defined by their end points
Args:
line1 (np.array): (2, 2), end points of line
line2 (np.array): (2, 2), end points of line
Returns:
intersection: coordinte of intersection point. None is not exists.
"""
# https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
assert line1.shape == (2,2)
assert line2.shape == (2,2)
x1, y1 = line1[0,:]
x2, y2 = line1[1,:]
x3, y3 = line2[0,:]
x4, y4 = line2[1,:]
num = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)
if np.abs(num) < EPSILON:
return None
den_t = (x1-x3)*(y3-y4) - (y1-y3)*(x3-x4)
t = den_t / num
if t < 0 or t > 1:
return None
den_u = (x1-x2)*(y1-y3) - (y1-y2)*(x1-x3)
u = - den_u / num
if u < 0 or u > 1:
return None
return [x1+t*(x2-x1), y1+t*(y2-y1)]
def box2corners(x, y, w, h, alpha):
"""
box parameters to four box corners
"""
x4 = np.array([0.5, -0.5, -0.5, 0.5]) * w
y4 = np.array([0.5, 0.5, -0.5, -0.5]) * h
corners = np.stack([x4, y4], axis=1)
sin = np.sin(alpha)
cos = np.cos(alpha)
R = np.array([[cos, -sin],[sin, cos]])
rotated = corners @ R.T
rotated[:, 0] += x
rotated[:, 1] += y
return rotated
def box_intersection(corners1, corners2):
"""find intersection points pf two boxes
Args:
corners1 (np.array): 4x2 coordinates of corners
corners2 (np.array): 4x2 coordinates of corners
Returns:
inters (4, 4, 2): (i, j, :) means intersection of i-th edge of box1 with j-th of box2
mask (4, 4) bool: (i, j) indicates if intersection exists
"""
assert corners1.shape == (4,2)
assert corners2.shape == (4,2)
inters = np.zeros([4,4,2]) # edges of box1, edges of box2, coordinates
mask = np.zeros([4,4]).astype(np.bool)
for i in range(4):
line1 = np.stack([corners1[i, :], corners1[(i+1)%4, :]], axis=0)
for j in range(4):
line2 = np.stack([corners2[j, :], corners2[(j+1)%4, :]], axis=0)
it = line_seg_intersection(line1, line2)
if it is not None:
inters[i, j, :] = it
mask[i, j] = True
return inters, mask
def point_in_box(point, corners):
"""check if a point lies in a rectangle defined by corners.
idea: check projection
Args:
point (2,): coordinate of point
corners (4, 2): coordinate of corners
Returns:
True if point in box
"""
assert corners.shape == (4,2)
a = corners[0, :]
b = corners[1, :]
d = corners[3, :]
ab = b - a
am = point - a
ad = d - a
# consider projection of AM on the edge AB and AD
p_ab = np.dot(ab, am)
norm_ab = np.dot(ab, ab)
p_ad = np.dot(ad, am)
norm_ad = np.dot(ad, ad)
cond1 = p_ab > 0 and p_ab < norm_ab
cond2 = p_ad > 0 and p_ad < norm_ad
return cond1 and cond2
def box_in_box(corners1, corners2):
"""check if corners of 2 boxes lie in each other
Args:
corners1 (np.array): 4x2 coordinates of corners
corners2 (np.array): 4x2 coordinates of corners
Returns:
c1_in_2 (4, ): i-th corner of box1 in box2
c2_in_1 (4, ): i-th corner of box2 in box1
"""
assert corners1.shape == (4,2)
assert corners2.shape == (4,2)
c1_in_2 = np.zeros((4,)).astype(np.bool)
c2_in_1 = np.zeros((4,)).astype(np.bool)
for i in range(4):
if point_in_box(corners1[i, :], corners2):
c1_in_2[i] = True
if point_in_box(corners2[i, :], corners1):
c2_in_1[i] = True
return c1_in_2, c2_in_1
def intersection_poly(corners1, corners2):
"""find all vertices of the polygon for intersection of 2 boxes
vertices include intersection points of edges and box corner in the other box
Args:
corners1 (np.array): 4x2 coordinates of corners
corners2 (np.array): 4x2 coordinates of corners
Returns:
poly_vertices (N, 2): vertices of polygon
"""
# corner1 = box2corners(*box1)
# corner2 = box2corners(*box2)
c1_in_2, c2_in_1 = box_in_box(corners1, corners2)
corners_eff = np.concatenate([corners1[c1_in_2,:], corners2[c2_in_1,:]], axis=0)
inters, mask = box_intersection(corners1, corners2)
inters_lin = np.reshape(inters, (-1, 2))
mask_lin = np.reshape(mask, (-1, ))
inter_points = inters_lin[mask_lin, :]
poly_vertices = np.concatenate([corners_eff, inter_points], axis=0)
return poly_vertices
def compare_vertices(v1, v2):
"""compare two points according to the its angle around the origin point
of coordinate system. Useful for sorting vertices in anti-clockwise order
Args:
v1 (2, ): x1, y1
v2 (2, ): x2, y2
Returns:
int : 1 if angle1 > angle2. else -1
"""
x1, y1 = v1
x2, y2 = v2
n1 = np.sqrt(x1*x1 + y1*y1) + EPSILON
n2 = np.sqrt(x2*x2 + y2*y2) + EPSILON
if y1 > 0 and y2 < 0:
return -1
elif y1 < 0 and y2 > 0:
return 1
elif y1 > 0 and y2 > 0:
if x1/n1 < x2/n2:
return 1
else:
return -1
else:
if x1/n1 > x2/n2:
return 1
else:
return -1
import functools
def vertices2area(vertices):
"""sort vertices in anti-clockwise order and calculate the area of polygon
Args:
vertices (N, 2) with N>2: vertices of a convex polygon
Returns:
area: area of polygon
ls: sorted vertices (normalized to centroid)
"""
mean = np.mean(vertices, axis=0, keepdims=True)
vertices_normalized = vertices - mean
# sort vertices clockwise
ls = np.array(list(sorted(vertices_normalized, key=functools.cmp_to_key(compare_vertices))))
ls_ext = np.concatenate([ls, ls[0:1, :]], axis=0)
total = ls_ext[0:-1, 0]*ls_ext[1:, 1] - ls_ext[1:, 0] * ls_ext[0:-1, 1]
total = np.sum(total)
area = np.abs(total) / 2
return area, ls
def box_intersection_area(box1, box2):
corners1 = box2corners(*box1)
corners2 = box2corners(*box2)
v = intersection_poly(corners1, corners2)
if v.shape[0] < 3:
return 0
else:
return vertices2area(v)
# --------------------------------------------------------
# tests
# --------------------------------------------------------
def test_line_seg_intersection():
line1 = np.array([[0, 0], [0, 1]])
line2 = np.array([[1, 0], [1, 1]])
line3 = np.array([[0, 0], [1, 1]])
line4 = np.array([[1, 0], [0, 1]])
line5 = np.array([[0, 0], [1, 0]])
line6 = np.array([[0, 1], [1, 0.5]])
print(line_seg_intersection(line1, line2))
print(line_seg_intersection(line3, line4))
print(line_seg_intersection(line5, line6))
def test_box2corners():
corners = box2corners(1, 1, 2, 3, np.pi/6)
plt.figure()
plt.scatter(corners[:, 0], corners[:, 1])
for i in range(corners.shape[0]):
plt.text(corners[i, 0], corners[i, 1], str(i))
plt.axis("equal")
plt.show()
corners = box2corners(3, 1, 4, 2, np.pi/4)
plt.figure()
plt.scatter(corners[:, 0], corners[:, 1])
for i in range(corners.shape[0]):
plt.text(corners[i, 0], corners[i, 1], str(i))
plt.axis("equal")
plt.show()
def test_box_intersection(box1, box2):
corners1 = box2corners(*box1)
corners2 = box2corners(*box2)
inters, mask = box_intersection(corners1, corners2)
num_inters = np.sum(mask.astype(np.int))
inters_lin = np.reshape(inters, (-1, 2))
mask_lin = np.reshape(mask, (-1, ))
inter_points = inters_lin[mask_lin, :]
print("find %d intersections"%num_inters)
corners1 = box2corners(*box1)
corners2 = box2corners(*box2)
plt.figure()
plt.scatter(corners1[:, 0], corners1[:, 1])
plt.scatter(corners2[:, 0], corners2[:, 1])
plt.scatter(inter_points[:, 0], inter_points[:, 1], marker='x')
for i in range(corners1.shape[0]):
plt.text(corners1[i, 0], corners1[i, 1], str(i))
for i in range(corners2.shape[0]):
plt.text(corners2[i, 0], corners2[i, 1], str(i))
plt.axis("equal")
plt.show()
def test_point_in_box():
p = np.random.rand(5000, 2)
p[:, 0] *= 12
p[:, 0] -= 5
p[:, 1] *= 12
p[:, 1] -= 5
corners = box2corners(3, 1, 4, 2, np.pi/4)
plt.figure()
plt.scatter(corners[:, 0], corners[:, 1])
for i in range(corners.shape[0]):
plt.text(corners[i, 0], corners[i, 1], str(i))
mask = [point_in_box(x, corners) for x in p]
plt.scatter(p[mask, 0], p[mask, 1], marker="x")
plt.axis("equal")
plt.show()
def test_intersection_area(box1, box2):
area, corners = box_intersection_area(box1, box2)
print(area)
print(corners)
plt.figure()
plt.scatter(corners[:, 0], corners[:, 1])
for i in range(corners.shape[0]):
plt.text(corners[i, 0], corners[i, 1], str(i))
plt.axis("equal")
plt.show()
if __name__ == "__main__":
# test_line_seg_intersection()
# test_box2corners()
# test_point_in_box()
box1 = np.array([0, 0, 2, 3, np.pi/6])
box2 = np.array([1, 1, 4, 4, -np.pi/4])
test_box_intersection(box1, box2)
test_intersection_area(box1, box2)
|
the-stack_0_22501 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
"""
Example IceTray script for performing reconstructions
"""
from __future__ import absolute_import, division, print_function
__author__ = "P. Eller"
__license__ = """Copyright 2017-2019 Justin L. Lanfranchi and Philipp Eller
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from argparse import ArgumentParser
from os.path import abspath, dirname
import sys
from icecube import dataclasses, icetray, dataio # pylint: disable=unused-import
from I3Tray import I3Tray
if __name__ == "__main__" and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro import __version__, init_obj
from retro.reco import Reco
def main():
"""Script to run Retro recos in icetray"""
parser = ArgumentParser()
parser.add_argument(
"--input-i3-file", type=str,
required=True,
nargs="+",
help="""Input I3 file""",
)
parser.add_argument(
"--output-i3-file", type=str,
required=True,
help="""Output I3 file""",
)
split_kwargs = init_obj.parse_args(dom_tables=True, tdi_tables=True, parser=parser)
other_kw = split_kwargs.pop("other_kw")
# instantiate Retro reco object
my_reco = Reco(**split_kwargs)
tray = I3Tray()
tray.AddModule(
_type="I3Reader",
_name="reader",
FilenameList=other_kw["input_i3_file"],
)
tray.Add(
_type=my_reco,
_name="retro",
methods="crs_prefit",
reco_pulse_series_name="SRTTWOfflinePulsesDC",
hit_charge_quant=0.05,
min_hit_charge=0.25,
seeding_recos=["L5_SPEFit11", "LineFit_DC"],
triggers=["I3TriggerHierarchy"],
additional_keys=["L5_oscNext_bool"],
filter='event["header"]["L5_oscNext_bool"] and len(event["hits"]) >= 8',
point_estimator="median",
)
tray.AddModule(
_type="I3Writer",
_name="writer",
DropOrphanStreams=[icetray.I3Frame.DAQ],
filename=other_kw["output_i3_file"],
)
tray.AddModule(_type="TrashCan", _name="GoHomeYouReDrunk")
tray.Execute()
tray.Finish()
if __name__ == "__main__":
main()
|
the-stack_0_22502 | import asyncio
import io
import os
import stat
import tempfile
import pytest
from whitenoise.asgi import convert_asgi_headers, convert_wsgi_headers, read_file, receive_request, serve_static_file, \
AsgiWhiteNoise
from whitenoise.responders import StaticFile
from .test_whitenoise import application as whitenoise_application, files
@pytest.fixture()
def loop():
return asyncio.get_event_loop()
class MockStat:
def __init__(self, st_mode, st_size, st_mtime):
self.st_mode = st_mode
self.st_size = st_size
self.st_mtime = st_mtime
@pytest.fixture()
def static_file_sample():
content = b"01234567890123456789"
modification_time = "Sun, 09 Sep 2001 01:46:40 GMT"
modification_epoch = 1000000000
temporary_file = tempfile.NamedTemporaryFile(suffix=".js", delete=False)
try:
temporary_file.write(content)
temporary_file.close()
stat_cache = {
temporary_file.name: MockStat(stat.S_IFREG, len(content), modification_epoch)
}
static_file = StaticFile(temporary_file.name, [], stat_cache=stat_cache)
yield {
"static_file": static_file,
"content": content,
"content_length": len(content),
"modification_time": modification_time,
}
finally:
os.unlink(temporary_file.name)
@pytest.fixture(params=["GET", "HEAD"])
def method(request):
return request.param
@pytest.fixture(params=[10, 20])
def block_size(request):
return request.param
@pytest.fixture()
def file_not_found():
async def application(scope, receive, send):
if scope["type"] != "http":
raise RuntimeError()
await receive()
await send({"type": "http.response.start", "status": 404})
await send({"type": "http.response.body", "body": b"Not found"})
return application
@pytest.fixture()
def websocket():
async def application(scope, receive, send):
if scope["type"] != "websocket":
raise RuntimeError()
await receive()
await send({"type": "websocket.accept"})
await send({"type": "websocket.close"})
return application
class Receiver:
def __init__(self):
self.events = [{"type": "http.request"}]
async def __call__(self):
return self.events.pop(0)
class Sender:
def __init__(self):
self.events = []
async def __call__(self, event):
self.events.append(event)
@pytest.fixture()
def receive():
return Receiver()
@pytest.fixture()
def send():
return Sender()
def test_asgiwhitenoise(loop, receive, send, method, whitenoise_application, files):
asgi_whitenoise = AsgiWhiteNoise(whitenoise_application, None)
scope = {
"type": "http",
"path": "/" + files.js_path,
"headers": [],
"method": method,
}
loop.run_until_complete(asgi_whitenoise(scope, receive, send))
assert receive.events == []
assert send.events[0]["status"] == 200
if method == "GET":
assert send.events[1]["body"] == files.js_content
def test_asgiwhitenoise_not_found(loop, receive, send, whitenoise_application, file_not_found):
asgi_whitenoise = AsgiWhiteNoise(whitenoise_application, file_not_found)
scope = {
"type": "http",
"path": "/static/foo.js",
"headers": [],
"method": "GET",
}
loop.run_until_complete(asgi_whitenoise(scope, receive, send))
assert receive.events == []
assert send.events == [
{"type": "http.response.start", "status": 404},
{"type": "http.response.body", "body": b"Not found"},
]
def test_asgiwhitenoise_not_http(loop, receive, send, whitenoise_application, websocket):
asgi_whitenoise = AsgiWhiteNoise(whitenoise_application, websocket)
receive.events = [{"type": "websocket.connect"}]
scope = {
"type": "websocket",
"path": "/endpoint",
"headers": [],
"method": "GET",
}
loop.run_until_complete(asgi_whitenoise(scope, receive, send))
assert receive.events == []
assert send.events == [
{"type": "websocket.accept"},
{"type": "websocket.close"},
]
def test_serve_static_file(loop, send, method, block_size, static_file_sample):
loop.run_until_complete(serve_static_file(send, static_file_sample["static_file"], method, {}, block_size))
expected_events = [
{
"type": "http.response.start",
"status": 200,
"headers": [
(b"last-modified", static_file_sample["modification_time"].encode()),
(b"etag", static_file_sample["static_file"].etag.encode()),
(b"content-length", str(static_file_sample["content_length"]).encode()),
],
}]
if method == "GET":
for start in range(0, static_file_sample["content_length"], block_size):
expected_events.append({
"type": "http.response.body",
"body": static_file_sample["content"][start:start + block_size],
"more_body": True,
})
expected_events.append({"type": "http.response.body"})
assert send.events == expected_events
def test_receive_request(loop, receive):
loop.run_until_complete(receive_request(receive))
assert receive.events == []
def test_receive_request_with_more_body(loop, receive):
receive.events = [
{"type": "http.request", "more_body": True, "body": b"content"},
{"type": "http.request", "more_body": True, "body": b"more content"},
{"type": "http.request"},
]
loop.run_until_complete(receive_request(receive))
assert receive.events == []
def test_receive_request_with_invalid_event(loop, receive):
receive.events = [{"type": "http.weirdstuff"}]
with pytest.raises(RuntimeError):
loop.run_until_complete(receive_request(receive))
def test_read_file():
content = io.BytesIO(b"0123456789")
content.seek(4)
blocks = list(read_file(content, content_length=5, block_size=2))
assert blocks == [b"45", b"67", b"8"]
def test_read_too_short_file():
content = io.BytesIO(b"0123456789")
content.seek(4)
with pytest.raises(RuntimeError):
list(read_file(content, content_length=11, block_size=2))
def test_convert_asgi_headers():
wsgi_headers = convert_asgi_headers([
(b"accept-encoding", b"gzip,br"),
(b"range", b"bytes=10-100"),
])
assert wsgi_headers == {
"HTTP_ACCEPT_ENCODING": "gzip,br",
"HTTP_RANGE": "bytes=10-100",
}
def test_convert_wsgi_headers():
wsgi_headers = convert_wsgi_headers([
("Content-Length", "1234"),
("ETag", "ada"),
])
assert wsgi_headers == [
(b"content-length", b"1234"),
(b"etag", b"ada"),
]
|
the-stack_0_22503 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('print_options01.xlsx')
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.hide_gridlines(0)
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
the-stack_0_22505 | import requests
from bs4 import BeautifulSoup
def DownloadWord(word):
URL = 'https://www.lexico.com/en/definition/' + word
print(URL)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
parents = soup.find_all('h3', class_='pronunciations')
for element in parents:
link = element.find('audio')['src']
audio = requests.get(link, allow_redirects=True)
open(word + '.mp3', 'wb').write(audio.content) #no type safety here, hopefully it's always mp3
inputFile = 'input.txt'
with open(inputFile) as fp:
line = fp.readline()
while line:
DownloadWord(line.strip())
line = fp.readline()
#keeps terminal open at the end
input() |
the-stack_0_22508 | from __future__ import print_function
import time
import Pyro4
print("Autoreconnect using PYRO uri.")
@Pyro4.expose
class TestClass(object):
def method(self, arg):
print("Method called with %s" % arg)
print("You can now try to stop this server with ctrl-C/ctrl-Break")
time.sleep(1)
# We are responsible to (re)connect objects with the same object Id,
# so that the client can reuse its PYRO-uri directly to reconnect.
# There are a few options, such as depending on the Name server to
# maintain a name registration for our object (see the serverNS for this).
# Or we could store our objects in our own persistent database.
# But for this example we will just use a pre-generated id (fixed name).
# The other thing is that your Daemon must re-bind on the same port.
# By default Pyro will select a random port so we specify a fixed port.
with Pyro4.core.Daemon(port=7777) as daemon:
uri = daemon.register(TestClass, objectId="example.autoreconnect_fixed_objectid")
print("Server started, uri: %s" % uri)
daemon.requestLoop()
|
the-stack_0_22509 | # -*- coding: utf-8 -*-
"""
Navigation Drawer
=================
Copyright (c) 2015 Andrés Rodríguez and KivyMD contributors -
KivyMD library up to version 0.1.2
Copyright (c) 2019 Ivanov Yuri and KivyMD contributors -
KivyMD library version 0.1.3 and higher
For suggestions and questions:
<[email protected]>
This file is distributed under the terms of the same license,
as the Kivy framework.
`Material Design spec, Navigation drawer <https://material.io/design/components/navigation-drawer.html>`_
Example
-------
from kivy.app import App
from kivy.lang import Builder
from kivymd.navigationdrawer import NavigationDrawerIconButton
from kivymd.theming import ThemeManager
from kivymd.toast import toast
main_kv = '''
#:import MDToolbar kivymd.toolbar.MDToolbar
#:import MDNavigationDrawer kivymd.navigationdrawer.MDNavigationDrawer
#:import NavigationDrawerSubheader kivymd.navigationdrawer.NavigationDrawerSubheader
<ContentNavigationDrawer@MDNavigationDrawer>
drawer_logo: 'demos/kitchen_sink/assets/drawer_logo.png'
NavigationDrawerSubheader:
text: "Menu:"
NavigationLayout:
id: nav_layout
ContentNavigationDrawer:
id: nav_drawer
BoxLayout:
orientation: 'vertical'
MDToolbar:
id: toolbar
title: 'KivyMD Kitchen Sink'
md_bg_color: app.theme_cls.primary_color
background_palette: 'Primary'
background_hue: '500'
elevation: 10
left_action_items:
[['dots-vertical', lambda x: app.root.toggle_nav_drawer()]]
Widget:
'''
class Example(App):
theme_cls = ThemeManager()
theme_cls.primary_palette = 'Blue'
title = "Navigation Drawer"
main_widget = None
def build(self):
self.main_widget = Builder.load_string(main_kv)
return self.main_widget
def callback(self, instance, value):
toast("Pressed item menu %d" % value)
def on_start(self):
for i in range(15):
self.main_widget.ids.nav_drawer.add_widget(
NavigationDrawerIconButton(
icon='checkbox-blank-circle', text="Item menu %d" % i,
on_release=lambda x, y=i: self.callback(x, y)))
Example().run()
"""
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import StringProperty, ObjectProperty, NumericProperty,\
ListProperty, BooleanProperty, OptionProperty
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivymd import images_path
from kivymd.elevation import RectangularElevationBehavior
from kivymd.icon_definitions import md_icons
from kivymd.label import MDLabel
from kivymd.list import BaseListItem, ILeftBody, OneLineListItem,\
OneLineIconListItem, IRightBody
from kivymd.theming import ThemableBehavior
from kivymd.toolbar import MDToolbar
from kivymd.vendor.navigationdrawer import NavigationDrawer as\
VendorNavigationDrawer
Builder.load_string('''
#:import OneLineIconListItem kivymd.list.OneLineIconListItem
#:import MDLabel kivymd.label.MDLabel
#:import colors kivymd.color_definitions.colors
#:import get_color_from_hex kivy.utils.get_color_from_hex
#:import ScrollView kivy.uix.scrollview.ScrollView
#:import Window kivy.core.window.Window
<NavigationDrawerToolbar>
elevation: 0
specific_text_color: root.theme_cls.secondary_text_color
opposite_colors: False
title_theme_color: 'Secondary'
md_bg_color: root.theme_cls.bg_light
canvas:
Color:
rgba: root.theme_cls.divider_color
Line:
points: self.x, self.y, self.x+self.width,self.y
<NavigationLayout>
<MDNavigationDrawer>
_list: list
_drawer_logo: drawer_logo
_drawer_title: drawer_title
spacing: dp(5)
canvas:
Color:
rgba: root.theme_cls.bg_light
Rectangle:
size: root.size
pos: root.pos
canvas.before:
Color:
rgba: root.shadow_color
Rectangle:
size: Window.size
pos: 0, 0
Image:
id: drawer_logo
size_hint_y: .3
source: root.drawer_logo
allow_stretch: True
keep_ratio: False
MDLabel:
id: drawer_title
text: ' {}'.format(root.drawer_title)
font_style: 'H6'
size_hint_y: None
height: self.texture_size[1]
markup: True
theme_text_color: 'Primary'
MDSeparator:
id: sep
ScrollView:
id: scroll
size_hint_y: .7
GridLayout:
id: list
cols: 1
size_hint_y: None
height: self.minimum_height
<NavigationDrawerIconButton>
theme_text_color:
'Primary' if not root._active\
else 'Custom' if root.use_active else 'Primary'
text_color:
root.theme_cls.secondary_text_color\
if not root._active else root.active_color if\
root.active_color_type == "custom" else root._active_color\
if root.use_active else root.theme_cls.secondary_text_color
NDIconLabel:
id: _icon
font_style: 'Icon'
theme_text_color:
'Secondary' if not root._active\
else 'Custom' if root.use_active else 'Custom'
text_color:
root.theme_cls.secondary_text_color if not root._active\
else root.active_color if root.active_color_type == "custom"\
else root._active_color if root.use_active else\
root.theme_cls.secondary_text_color
BoxLayout:
id: _right_container
size_hint: None, None
x: root.x + root.width - _badge.texture_size[0] - dp(25)
y: root.y + root.height / 2 - self.height / 2
size: dp(70), root.height
NDBadgeLabel:
id: _badge
theme_text_color:
'Secondary' if not root._active else 'Custom'\
if root.use_active else 'Custom'
text_color:
root.theme_cls.secondary_text_color if not root._active\
else root.active_color if root.active_color_type == "custom"\
else root._active_color if root.use_active else\
root.theme_cls.secondary_text_color
text: root.badge_text
halign: 'right'
<NavigationDrawerDivider>
canvas:
Color:
rgba: self.theme_cls.divider_color
Line:
points: root.x, root.y + dp(8), root.x + self.width, root.y + dp(8)
''')
class NDIconLabel(ILeftBody, MDLabel):
pass
class NDBadgeLabel(IRightBody, MDLabel):
pass
class NavigationDrawerHeaderBase:
"""
Tells the :class:`~MDNavigationDrawer` that this should be
in the header area (above the :class:`~kivy.uix.scrollview.ScrollView`).
"""
pass
class NavigationDrawerToolbar(MDToolbar, NavigationDrawerHeaderBase):
def _update_specific_text_color(self, instance, value):
pass
class NavigationDrawerIconButton(OneLineIconListItem):
"""An item in the :class:`MDNavigationDrawer`."""
_active = BooleanProperty(False)
_active_color = ListProperty()
_icon = ObjectProperty()
divider = None
active_color = ListProperty()
"""Custom active color.
This option only takes effect when :attr:`active_color_type` = 'custom'.
:attr:`active_color` is a :class:`~kivy.properties.ListProperty`
and defaults to None.
"""
active_color_type = OptionProperty('primary',
options=['primary', 'accent', 'custom'])
"""Decides which color should be used for the active color.
This option only takes effect when :attr:`use_active` = True.
Options:
primary: Active color will be the primary theme color.
accent: Active color will be the theme's accent color.
custom: Active color will be taken from the :attr:`active_color` attribute.
:attr:`active_color_type` is a :class:`~kivy.properties.OptionProperty`
and defaults to 'primary'.
"""
icon = StringProperty('checkbox-blank-circle')
"""Icon that appears to the left of the widget.
:attr:`icon` is a :class:`~kivy.properties.StringProperty` and defaults
to 'checkbox-blank-circle'.
"""
badge_text = StringProperty('')
"""
Text that appears on the right side of the item, usually
for displaying a count of sorts.
:attr:`badge_text` is a :class:`~kivy.properties.StringProperty`
and defaults to ''.
"""
use_active = BooleanProperty(True)
"""If the button should change to the active color when selected.
:attr:`use_active` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True.
See also:
:attr:`active_color`
:attr:`active_color_type`
"""
# active_color = get_color_from_hex(colors['Red']['500'])
# active_color_type = 'custom'
def __init__(self, **kwargs):
super(NavigationDrawerIconButton, self).__init__(**kwargs)
self._set_active_color()
self.theme_cls.bind(primary_color=self._set_active_color_primary,
accent_color=self._set_active_color_accent)
Clock.schedule_once(lambda x: self.on_icon(self, self.icon))
def _set_active(self, active, nav_drawer):
if self.use_active:
self._active = active
if nav_drawer.active_item != self:
if nav_drawer.active_item is not None:
nav_drawer.active_item._active = False
nav_drawer.active_item = self
def _set_active_color(self, *args):
if self.active_color_type == 'primary':
self._set_active_color_primary()
elif self.active_color_type == 'accent':
self._set_active_color_accent()
# Note to future developers/myself: These must be separate functions
def _set_active_color_primary(self, *args):
if self.active_color_type == 'primary':
self._active_color = self.theme_cls.primary_color
def _set_active_color_accent(self, *args):
if self.active_color_type == 'accent':
self._active_color = self.theme_cls.accent_color
def on_icon(self, instance, value):
self.ids['_icon'].text = u'{}'.format(md_icons[value])
def on_active_color_type(self, *args):
self._set_active_color(args)
class NavigationDrawerSubheader(OneLineListItem):
"""
A subheader for separating content in :class:`MDNavigationDrawer`
Works well alongside :class:`NavigationDrawerDivider`
"""
disabled = True
divider = None
theme_text_color = 'Secondary'
class NavigationDrawerDivider(OneLineListItem):
"""
A small full-width divider that can be placed
in the :class:`MDNavigationDrawer`
"""
disabled = True
divider = None
_txt_top_pad = NumericProperty(dp(8))
_txt_bot_pad = NumericProperty(dp(8))
def __init__(self, **kwargs):
super(OneLineListItem, self).__init__(**kwargs)
self.height = dp(16)
class MDNavigationDrawer(BoxLayout, ThemableBehavior,
RectangularElevationBehavior):
_elevation = NumericProperty(0)
_list = ObjectProperty()
_drawer_logo = ObjectProperty()
_drawer_title = ObjectProperty()
active_item = ObjectProperty(None)
orientation = 'vertical'
panel = ObjectProperty()
drawer_logo = StringProperty()
drawer_title = StringProperty()
shadow_color = ListProperty([0, 0, 0, 0])
use_logo = OptionProperty('all', options=['logo', 'label', 'all'])
def __init__(self, **kwargs):
super(MDNavigationDrawer, self).__init__(**kwargs)
def on_use_logo(self, instance, value):
if value == 'label':
self.remove_widget(self.ids.drawer_logo)
elif value == 'logo':
self.remove_widget(self.ids.drawer_title)
self.remove_widget(self.ids.sep)
def add_widget(self, widget, **kwargs):
"""
If the widget is a subclass of :class:`~NavigationDrawerHeaderBase`,
then it will be placed above the
:class:`~kivy.uix.scrollview.ScrollView`.
Otherwise, it will be placed in the main
:class:`~kivy.uix.scrollview.ScrollView`
content area.
"""
if issubclass(widget.__class__, BaseListItem):
self._list.add_widget(widget, **kwargs)
if len(self._list.children) == 1:
widget._active = True
self.active_item = widget
widget.bind(on_release=lambda x: self.panel.toggle_state())
try:
widget.bind(on_release=lambda x: x._set_active(True, self))
except AttributeError:
pass
else:
super(MDNavigationDrawer, self).add_widget(widget, **kwargs)
class NavigationLayout(VendorNavigationDrawer, ThemableBehavior):
"""The container layout that manages the :class:`MDNavigationDrawer`."""
opening_transition = StringProperty('out_sine')
closing_transition = StringProperty('out_sine')
min_dist_to_open = NumericProperty(.2)
min_dist_to_close = NumericProperty(.8)
anim_time = NumericProperty(.2)
separator_image = StringProperty(
'{}'.format(images_path + '/transparent.png'))
side_panel_positioning = 'left'
side_panel_width = (dp(320) * 80) // 100\
if dp(320) >= Window.width else dp(320)
max_shadow_opacity = NumericProperty(.5)
anim_type = StringProperty('slide_above_simple')
def __init__(self, **kwargs):
super(NavigationLayout, self).__init__(**kwargs)
self.on_anim_type()
def _anim_relax(self):
if self.state == 'open':
if self._anim_progress < self.min_dist_to_close:
self.anim_to_state('closed')
else:
self.anim_to_state('open')
else:
if self._anim_progress > self.min_dist_to_open:
self.anim_to_state('open')
else:
self.anim_to_state('closed')
def on__anim_progress(self, *args):
self.side_panel.shadow_color = [
0, 0, 0, self.max_shadow_opacity*self._anim_progress]
self.side_panel.elevation = 1 * self._anim_progress
if self._anim_progress > 1:
self._anim_progress = 1
elif self._anim_progress < 0:
self._anim_progress = 0
if self._anim_progress >= 1:
self.state = 'open'
elif self._anim_progress <= 0:
self.state = 'closed'
def add_widget(self, widget, **kwargs):
"""
First widget added must be the content for the side/sliding panel.
The next widget must be the main content.
This layout only accepts two widgets, any more than two widgets will
raise a ValueError
"""
# Internal default BoxLayouts
if len(self.children) == 0:
super(NavigationLayout, self).add_widget(widget, **kwargs)
self._side_panel = widget
elif len(self.children) == 1:
super(NavigationLayout, self).add_widget(widget, **kwargs)
self._main_panel = widget
elif len(self.children) == 2:
super(NavigationLayout, self).add_widget(widget, **kwargs)
self._join_image = widget
# Adding of user widgets
elif self.side_panel is None:
self.set_side_panel(widget)
widget.panel = self
elif self.main_panel is None:
self.set_main_panel(widget)
else:
raise ValueError(
'Can\'t add more than two widgets directly to NavigationLayout')
def toggle_nav_drawer(self):
self.toggle_state(True)
|
the-stack_0_22510 | originalFile = open('original-image.txt', "r")
challengeFile = open('../challenge/challenge.txt', "w")
outputString = ''
secretMsg = ' flag{psych0-t3xt}'
secretMsgCounter = 0
spacer = 50
for i, c in enumerate(originalFile.read()):
if i % spacer == 0 and secretMsgCounter < len(secretMsg):
outputString += secretMsg[secretMsgCounter]
secretMsgCounter+=1
else:
outputString += c
challengeFile.write(outputString)
challengeFile.close() |
the-stack_0_22511 | import os
import re
import sys
import h5py
import time
import json
import logging
import argparse
import pickle as pkl
import multiprocessing
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from nglib.common import utils
from nglib.corpora import GigawordNYT
from nglib.narrative.narrative_graph import NarrativeGraph, NGNode
from nglib.narrative.narrative_graph import get_next_relations
from nglib.narrative.narrative_graph import create_narrative_graph
from nglib.narrative.narrative_graph import get_ng_bert_tokenizer
def get_arguments(argv):
parser = argparse.ArgumentParser(description='prepare narrative graphs')
parser.add_argument('config_file', metavar='CONFIG_FILE',
help='config file of sentence sampling')
parser.add_argument('output_dir', metavar='OUTPUT_DIR',
help='output dir')
parser.add_argument('--target_split', type=str, default=None,
choices=['train', 'dev', 'test'],
help='target split (train, dev, test)')
parser.add_argument("--bert_weight_name",
default='google/bert_uncased_L-2_H-128_A-2', type=str,
help="bert weight version")
parser.add_argument("--max_seq_len", default=128, type=int,
help="max sequence length for BERT encoder")
parser.add_argument("--min_coref_chain_len", default=9, type=int,
help="min coref chain len (default 9)")
parser.add_argument("--instance_min", default=20, type=int,
help="minimum number of instances (default 20)")
parser.add_argument("--instance_max", default=350, type=int,
help="maximum number of instances (default 350)")
parser.add_argument('--is_cased', action='store_true', default=False,
help='BERT is case sensitive')
parser.add_argument('--save_ng_pkl', action='store_true', default=False,
help='save narrative graph pickle (space-consuming)')
parser.add_argument('--no_discourse', action='store_true', default=False,
help='no discourse relations')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='show info messages')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='show debug messages')
args = parser.parse_args(argv)
return args
def write_instance(
cur_gid, doc_id, bert_inputs, rgcn_inputs, coref_chains, fw_h5, fw_chains):
# rgcn_inputs
edge_src = np.array(rgcn_inputs['edge_src'])
edge_types = np.array(rgcn_inputs['edge_types'])
edge_dest = np.array(rgcn_inputs['edge_dest'])
edge_norms = np.array(rgcn_inputs['edge_norms'])
ng_edges = np.vstack((edge_src, edge_types, edge_dest, edge_norms))
fw_h5.create_dataset('graph_{}/ng_edges'.format(cur_gid), data=ng_edges)
# bert_inputs
binput = torch.stack((bert_inputs['input_ids'], bert_inputs['input_masks'], bert_inputs['token_type_ids']), dim=0)
# turn nid2rows into numpy array with -1 padding
max_len = 0
nid2rows = bert_inputs['nid2rows']
for nid in range(len(nid2rows)):
idxs = nid2rows[nid]
if len(idxs) > max_len:
max_len = len(idxs)
new_nid2rows = -1 * np.ones((len(nid2rows), max_len), dtype=np.int64)
for nid in range(len(nid2rows)):
idxs = nid2rows[nid]
new_nid2rows[nid, :len(idxs)] = idxs
fw_h5.create_dataset('graph_{}/bert_inputs'.format(cur_gid), data=binput.cpu().numpy())
fw_h5.create_dataset('graph_{}/bert_target_idxs'.format(cur_gid), data=bert_inputs['target_idxs'].cpu().numpy())
fw_h5.create_dataset('graph_{}/bert_nid2rows'.format(cur_gid), data=new_nid2rows)
if len(coref_chains) > 0:
# write h5
h5_cchains = []
for cchain in coref_chains:
h5_cchain = [e['nid'] for e in cchain]
h5_cchains.append(h5_cchain)
h5_cchains = chain2numpy(h5_cchains)
fw_h5.create_dataset(
'graph_{}/coref_chains'.format(cur_gid), data=h5_cchains)
# write json
out = {
'doc_id': doc_id,
'gid': cur_gid,
'chains': coref_chains
}
tmp = json.dumps(out)
fw_chains.write(tmp + '\n')
def chain2numpy(chains):
m = max([len(c) for c in chains])
arr = np.ones((len(chains), m), dtype=np.int64) * -1
for i in range(len(chains)):
arr[i][:len(chains[i])] = chains[i]
return arr
def worker(pidx, g_graph_counts, g_rel_counts,
src_fpath, target_dir, prefix,
rtype2idx, dmarkers, no_entity):
logger = utils.get_root_logger(args, log_fname='{}.log'.format(prefix))
dest_fpath = os.path.join(target_dir, '{}.h5'.format(prefix))
chain_fpath = os.path.join(target_dir, '{}.json'.format(prefix))
docid_fpath = os.path.join(target_dir, '{}.txt'.format(prefix))
logger.info('processing {} -> {}'.format(src_fpath, dest_fpath))
bert_tokenizer = get_ng_bert_tokenizer(
args.bert_weight_name, args.is_cased)
t1 = time.time()
gid2docid = OrderedDict()
count_docs = 0
count_valid = 0
count_long_doc = 0
fw_chains = open(chain_fpath, 'w')
fw_h5 = h5py.File(dest_fpath, 'w')
with open(src_fpath, 'r') as fr:
for line in fr:
doc = json.loads(line)
count_docs += 1
# if count_docs == 1000: #debug
# break
if count_docs % 1000 == 0:
logger.info('p{}: count_docs={}, {} s'.format(
pidx, count_docs, time.time()-t1))
# filter by #nodes
ng, coref_chains = create_narrative_graph(
doc, rtypes=rtype2idx, dmarkers=dmarkers, no_entity=no_entity,
min_coref_chain_len=args.min_coref_chain_len
)
if (len(ng.nodes) < args.instance_min
or len(ng.nodes) > args.instance_max):
continue
bert_inputs, rgcn_inputs = ng.to_dgl_inputs(
bert_tokenizer, max_seq_len=args.max_seq_len)
if bert_inputs is None:
# sentence too long, skip this doc
count_long_doc += 1
logger.debug('p{}: sentence too long. skip the graph: {}'.format(
pidx, count_long_doc))
continue
if args.save_ng_pkl:
# only use it for small corpus
dpath = os.path.join(target_dir, 'ng_pickles')
if not os.path.exists(dpath):
os.makedirs(dpath)
fpath = os.path.join(dpath, '{}_{}.pkl'.format(
count_valid, doc['doc_id']))
logger.debug('saving {}...'.format(fpath))
pkl.dump(ng, open(fpath, 'wb'))
gid2docid[count_valid] = doc['doc_id']
write_instance(
count_valid, doc['doc_id'], bert_inputs, rgcn_inputs,
coref_chains, fw_h5, fw_chains)
estats = ng.get_edge_stats()
for rtype, c in estats.items():
g_rel_counts[rtype2idx[rtype]] += c
count_valid += 1
fw_h5.close()
fw_chains.close()
g_graph_counts[pidx] = count_valid
with open(docid_fpath, 'w') as fw:
for gid, docid in gid2docid.items():
fw.write('{}\t{}\n'.format(gid, docid))
logger.info('skip {} docs, because of long sentences.'.format(count_long_doc))
logger.info('{} / {} valid documents'.format(count_valid, count_docs))
logger.info('dump graphs: {} s'.format(time.time()-t1))
def get_marker2rtype(rtype2markers):
dmarkers = {}
for rtype, markers in rtype2markers.items():
for m in markers:
dmarkers[m] = rtype
return dmarkers
def main():
config = json.load(open(args.config_file))
assert config["config_target"] == "narrative_graph"
rtype2idx = config['rtype2idx']
if args.no_discourse:
dmarkers = None
else:
dmarkers = get_marker2rtype(config['discourse_markers'])
train_dir = os.path.join(config['nyt_dir'], 'train')
dev_dir = os.path.join(config['nyt_dir'], 'dev')
test_dir = os.path.join(config['nyt_dir'], 'test')
if args.target_split is None:
train_fs = sorted([os.path.join(train_dir, fn) for fn in os.listdir(train_dir)])
dev_fs = sorted([os.path.join(dev_dir, fn) for fn in os.listdir(dev_dir)])
test_fs = sorted([os.path.join(test_dir, fn) for fn in os.listdir(test_dir)])
fs = train_fs + dev_fs + test_fs
elif args.target_split == 'train':
fs = sorted([os.path.join(train_dir, fn) for fn in os.listdir(train_dir)])
elif args.target_split == 'dev':
fs = sorted([os.path.join(dev_dir, fn) for fn in os.listdir(dev_dir)])
elif args.target_split == 'test':
fs = sorted([os.path.join(test_dir, fn) for fn in os.listdir(test_dir)])
else:
raise ValueError('target split: {}'.format(args.target_split))
t1 = time.time()
g_graph_counts = multiprocessing.Array('i', len(fs))
all_g_rel_counts = []
ps = []
for pidx, src_fpath in enumerate(fs):
g_rel_counts = multiprocessing.Array('i', len(rtype2idx))
for i in range(len(rtype2idx)):
g_rel_counts[i] = 0
all_g_rel_counts.append(g_rel_counts)
sp = src_fpath.split('/')
fn = sp[-1]
split = sp[-2]
target_dir = os.path.join(args.output_dir, split)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
prefix = fn.split('.')[0]
# worker(pidx,
# g_graph_counts,
# g_rel_counts,
# src_fpath, target_dir,
# prefix,
# rtype2idx,
# dmarkers,
# config['no_entity']) # debug
p = multiprocessing.Process(target=worker,
args=(pidx,
g_graph_counts,
g_rel_counts,
src_fpath, target_dir,
prefix,
rtype2idx,
dmarkers,
config['no_entity'])
)
p.start()
ps.append(p)
for p in ps:
p.join()
# output stats
fpath = os.path.join(args.output_dir, 'stats.txt')
with open(fpath, 'w') as fw:
text = 'all done: {} s, {} docs'.format(time.time()-t1, sum(g_graph_counts))
print(text)
fw.write(text+'\n')
# merge all_g_rel_counts
all_rel_counts = [0] * len(rtype2idx)
for rcounts in all_g_rel_counts:
for i in range(len(rtype2idx)):
all_rel_counts[i] += rcounts[i]
for rtype, idx in rtype2idx.items():
text = '{}: {} rels'.format(rtype, all_rel_counts[idx])
print(text)
fw.write(text + '\n')
if __name__ == "__main__":
args = utils.bin_config(get_arguments)
main()
|
the-stack_0_22512 | import argparse
import glob
import os
import yaml
from rdkit import Chem
from rdkit.Chem import QED
from tqdm import tqdm
from yaml import load, Loader
from collections import ChainMap
import time
from multiprocessing import Pool
from functools import partial
class Library:
def __init__(self, path, filters=None,n_workers=100, save=False):
self.path = path # not using absolute path since we defined path on parse_args()
self.filters = filters
self.n_workers=n_workers
self.errors=0
#self.filt_yaml= open(,'r')
self.sd_files = self._retrieve_files() # not passing self.path since we access it within method
self.sd_files = self.split_in_chunks(self.sd_files,5000)
tqdm(self.parallelize(self.main,self.sd_files,self.n_workers))
def parallelize(self,func,iterable,n_workers, **kwargs):
f=partial(func,**kwargs)
if n_workers> 1:
#pool = Pool(n_workers)
with Pool(n_workers) as pool:
r= list(tqdm(pool.imap(func,iterable)))
pool.close()
pool.join()
return 0
else:
return list(map(f,iterable))
def split_in_chunks(self,sdf_files,n_chunks=25000):
output_folder=f"split_sdfs_{n_chunks}"
output_files=[]
for sdf_file in sdf_files:
print("Processing file", sdf_file)
name, ext = os.path.splitext(sdf_file)
name = name.replace("/","_")
mols = 0
splits = 1
if not os.path.exists(output_folder):
os.mkdir(output_folder)
fw = open(os.path.join(output_folder,name+f"_{splits}"+ext),'w')
output_files.append(os.path.join(output_folder,name+f"_{splits}"+ext))
with open(sdf_file) as f:
for line in f:
fw.write(line)
if line.startswith("$$$$"):
mols+=1
if mols == n_chunks:
mols=0
fw.close()
splits+=1
#print(os.path.join(output_folder, name+f"_{splits}"+ext))
fw=open(os.path.join(output_folder, name+f"_{splits}"+ext),'w')
output_files.append(os.path.join(output_folder, name+f"_{splits}"+ext))
return output_files
def main(self, file_sdf):
# deleted self.fragments since we can omit it
mols= Chem.SDMolSupplier(file_sdf,removeHs=False)
name = os.path.basename(file_sdf).rsplit(".")[0]
final_name="%s_filtered.sdf" % name
for mol in mols:
self.molecule = mol
self.fragments_dum = [Fragment(mol)]
self.filters_f()
self.save(output=final_name)
def filters_f(self): # transformed condition into method
self.parsed_filters = self._load_filters()
self.filtered_fragments = self._apply_filters()
def save(self, output='filtered_molecules.sdf'): # transformed condition into method
output= open(output,'w')
writer = Chem.SDWriter(output)
for mol in self.filtered_fragments:
writer.write(mol)
def _retrieve_files(self): # deleted argument path since we can access it within method
sdf_path = os.path.join(self.path, "*.sdf")
sd_files = glob.glob(sdf_path)
return sd_files
def _load_filters(self):
with open(self.filters, 'r') as filters_file:
yaml = load(filters_file, Loader=Loader)
filters=yaml["filters"]
for k in filters:
for key, value in k.items():
try:
k[key] = [v.strip() for v in value.split("--")]
except:
k[key] = value
filters = dict(ChainMap(*filters))
return filters
def _apply_filters(self):
filtered = []
for frag in self.fragments_dum:
try:
if float(self.parsed_filters['mw'][0])< frag.mw < float(self.parsed_filters['mw'][1]):
if float(self.parsed_filters['logP'][0]) < frag.logP < float(self.parsed_filters['logP'][1]):
if int(self.parsed_filters['hbd'][0]) < frag.hbd < int(self.parsed_filters['hbd'][1]):
if int(self.parsed_filters['hba'][0]) < frag.hba < int(self.parsed_filters['hba'][1]):
if int(self.parsed_filters['psa'][0]) < frag.psa < int(self.parsed_filters['psa'][1]):
if int(self.parsed_filters['rotb'][0]) <= frag.rotb <= int(self.parsed_filters['rotb'][1]):
if bool(self.parsed_filters['arom'][0]) == frag.arom:
filtered.append(self.molecule)
self.errors+=1
except:
self.errors+=1
return filtered
# deleted _save_sd method
class Fragment():
def __init__(self, mol=None, mw=None, logP=None, hba=None, hbd=None, psa=None, rotb=None, arom=None):
self.molecule = mol if mol else None
self.molecule_name = mol.GetProp('_Name') if mol else None
self._qed = QED.properties(self.molecule) if mol else None
self.mw = mw if not mol else self._qed.MW
self.logP = logP if not mol else self._qed.ALOGP
self.hba = hba if not mol else self._qed.HBA
self.hbd = hbd if not mol else self._qed.HBD
self.psa = psa if not mol else self._qed.PSA
self.rotb = rotb if not mol else self._qed.ROTB
self.arom = arom if not mol else self._qed.AROM
def __str__(self): #changed to str to ensure readability
return "Molecule {name}\nMW = {mw}\nlogP = {logp}\n".format(name=self.molecule_name, mw=self.mw, logp=self.logP)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", type=str, required=True, help="Directory with fragment SD files.")
parser.add_argument("--filters", type=str, required=False, help="YML file with filters")
parser.add_argument("--n_workers", type = int, required = False, help = "Number of CPUs")
args = parser.parse_args()
return os.path.abspath(args.dir), args.filters, args.n_workers
def main():
path, filters, n_workers= parse_args()
lib = Library(path, filters,n_workers)
errors = lib.errors
print("Number of wrong molecules: %s" % (lib.errors))
if __name__ == "__main__":
start_time=time.time()
main()
print(" --- %s seconds ---" % (time.time()-start_time))
|
the-stack_0_22513 | import torch
import argparse
METADATA_FILEPATH = '/home/schao/url/results-20210308-203457_clean_031521.csv'
TRAIN_CANCERS = ['BLCA', 'BRCA', 'COAD', 'HNSC', 'LUAD', 'LUSC', 'READ', 'STAD']
VAL_CANCERS = ['ACC', 'CHOL', 'ESCA', 'LIHC', 'KICH', 'KIRC', 'OV', 'UCS', 'UCEC']
PARAMS = ['RENORMALIZE', 'TRAIN_FRAC', 'VAL_FRAC', 'BATCH_SIZE', 'WAIT_TIME', 'MAX_BATCHES', 'PIN_MEMORY', 'N_WORKERS', 'RANDOM_SEED',
'TRAINING', 'LEARNING_RATE', 'WEIGHT_DECAY', 'DROPOUT', 'PATIENCE', 'FACTOR', 'N_EPOCHS', 'DISABLE_CUDA', 'DEVICE',
'OUT_DIM', 'MIN_TILES', 'NUM_TILES', 'LABEL', 'UNIT', 'POOL', 'CANCERS', 'METADATA', 'STATE_DICT', 'VAL_STATS',
'VAL_CANCERS', 'TEST_VAL', 'HID_DIM', 'FREEZE', 'PRETRAINED', 'RES_DICT', 'RES_DICT_NEW', 'GRAD_ADAPT',
'ETA', 'N_CHOOSE', 'N_STEPS', 'N_TESTTRAIN', 'N_TESTTEST', 'N_REPLICATES', 'TEST_BATCH_SIZE', 'RANDOMIZE', 'BRIGHTNESS', 'RESIZE', 'STEPS']
POOL_KEY = {
'max': torch.max,
'mean': torch.mean,
'lse': torch.logsumexp
}
def parse_args():
parser = argparse.ArgumentParser(description='WGD classifier')
# data parameters
parser.add_argument('--renormalize', default=False, action='store_true', help='whether to recompute mean and std of train set')
parser.add_argument('--train_frac', type=float, default=0.8, help='fraction of examples allocated to the train set')
parser.add_argument('--val_frac', type=float, default=0.2, help='fraction of examples allocated to the val set')
parser.add_argument('--batch_size', type=int, default=200, help='number of examples per batch')
parser.add_argument('--wait_time', type=int, default=1, help='number of batches collected before backward pass')
parser.add_argument('--max_batches', nargs='*', type=int, default=[-1, -1], help='max number of batches during train, val per epoch (-1: all)')
parser.add_argument('--pin_memory', default=False, action='store_true', help='whether to pin memory during data loading')
parser.add_argument('--n_workers', type=int, default=12, help='number of workers to use during data loading')
parser.add_argument('--random_seed', type=int, default=31321, help='random seed of the dataset and data filter')
# learning parameters
parser.add_argument('--training', default=False, action='store_true', help='whether to train the model')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='[local] learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight assigned to L2 regularizer')
parser.add_argument('--dropout', type=float, default=0.0, help='feed forward dropout rate')
parser.add_argument('--patience', type=int, default=1, help='number of epochs with no improvement before invoking scheduler, model reloading')
parser.add_argument('--factor', type=float, default=0.1, help='factor by which to reduce learning rate during scheduling')
parser.add_argument('--n_epochs', type=int, default=20, help='number of epochs to train the model')
parser.add_argument('--disable_cuda', default=False, action='store_true', help='whether or not to use GPU')
parser.add_argument('--device', type=str, default='0', help='CUDA device to use if use GPU')
# I/O parameters
parser.add_argument('--output_size', type=int, default=1, help='model output dimension')
parser.add_argument('--min_tiles', type=int, default=1, help='min number of tiles for patient to be included during sampling')
parser.add_argument('--num_tiles', type=int, default=400, help='number of tiles to keep (tile) or sample (slide) per patient')
parser.add_argument('--label', type=str, default='WGD', help='label on which to perform classification task')
parser.add_argument('--unit', type=str, default='tile', help='input unit, i.e., whether to train on tile or slide')
parser.add_argument('--pool', type=str, default=None, help='pooling mechanism to use if input unit is slide')
parser.add_argument('--cancers', nargs='*', type=str, default=TRAIN_CANCERS, help='list of cancers to include [in the train set]')
parser.add_argument('--infile', type=str, default=METADATA_FILEPATH, help='file path to metadata dataframe')
parser.add_argument('--outfile', type=str, default='/home/schao/temp.pt', help='file path to save the model state dict')
parser.add_argument('--statsfile', type=str, default='/home/schao/temp.pkl', help='file path to save the per-epoch val stats')
# task parameters
parser.add_argument('--val_cancers', nargs='*', type=str, default=VAL_CANCERS, help='list of cancers to include in the val set')
parser.add_argument('--test_val', default=False, action='store_true', help='whether to test non-meta-learned model on val cancers')
parser.add_argument('--skip', type=int, default=0, help='number of metaval or metatest loaders to skip')
parser.add_argument('--hidden_size', type=int, default=512, help='feed forward hidden size')
parser.add_argument('--freeze', default=False, action='store_true', help='whether to freeze the resnet layers')
parser.add_argument('--pretrained', default=False, action='store_true', help='whether to load the ImageNet-pretrained resnet')
parser.add_argument('--netfile', type=str, default=None, help='file path to full neural network')
parser.add_argument('--resfile', type=str, default=None, help='file path to resnet')
parser.add_argument('--resfile_new', type=str, default=None, help='file path to newly-trained resnet, if freeze is false')
parser.add_argument('--grad_adapt', default=False, action='store_true', help='whether to grad adapt non-meta-learned model during test')
# meta-learning parameters
parser.add_argument('--eta', type=float, default=0.01, help='global learning rate')
parser.add_argument('--n_choose', type=int, default=5, help='number of tasks to sample during every training epoch')
parser.add_argument('--n_steps', type=int, default=1, help='number of gradient steps to take on meta-test train set')
parser.add_argument('--n_testtrain', type=int, default=0, help='number of examples on which to train during meta-test time or train time')
parser.add_argument('--n_testtest', type=int, default=0, help='number of examples on which to test during meta-test time or test time')
parser.add_argument('--n_replicates', type=int, default=1, help='number of replicates for metaval and metatest')
parser.add_argument('--test_batch_size', type=int, default=4, help='number of examples per meta-test test batch')
parser.add_argument('--randomize', default=False, action='store_true', help='whether to randomize the train size during meta-train/-test')
parser.add_argument('--adjust_brightness', type=float, default=None, help='desired brightness (<1 darker, >1 brighter) on meta-test set')
parser.add_argument('--resize', type=int, default=None, help='desired image size to which to interpolate on meta-test set')
parser.add_argument('--steps', type=int, default=None, help='desired number of grad steps up to which to test on meta-test set (overrides n_steps)')
args = parser.parse_args()
args.pool = POOL_KEY.get(args.pool, None)
return args |
the-stack_0_22516 | """Demo example file that shows how to create a NixIO Glitter2 h5 file
from a video file and example coded channels.
"""
import nixio
from glitter2.storage.data_file import DataFile
from glitter2.player import GlitterPlayer
from os.path import join, dirname
from kivy_garden.painter import PaintCircle
import math
video_file = join(dirname(__file__), 'video.mp4')
data_file = join(dirname(__file__), 'video_data.h5')
# create nix data file
nix_file = nixio.File.open(
data_file, nixio.FileMode.Overwrite,
compression=nixio.Compression.DeflateNormal)
# create our data controller
data_file = DataFile(nix_file=nix_file)
# initialize the glitter data structures in the file
data_file.init_new_file()
# read all the frame timestamps and video metadata from the video file, this
# may take some time as all the frames are read
timestamps, video_metadata = GlitterPlayer.get_file_data(video_file)
width, height = video_metadata['src_vid_size']
# create an event channel
event = []
for i in range(len(timestamps)):
event.append(bool((i // 10) % 2))
event_metadata = {'name': 'An event'}
# create an pos channel with spiral data
angle = 20 * math.pi / len(timestamps)
pos = []
extent = 1 / 3 * min(width, height)
center_x = width / 2
center_y = height / 2
for i in range(len(timestamps)):
current_angle = i * angle
pos.append((
center_x + i / len(timestamps) * extent * math.cos(current_angle),
center_y + i / len(timestamps) * extent * math.sin(current_angle)
))
pos_metadata = {'name': 'A spiral'}
# create a circle zone channel
circle = PaintCircle.create_shape(
[center_x, center_y], 5 / 12 * min(width, height))
zone_metadata = {'name': 'A circle', 'shape_config': circle.get_state()}
# now set the file data
data_file.set_file_data(
video_file_metadata=video_metadata, saw_all_timestamps=True,
timestamps=[timestamps], event_channels=[(event_metadata, [event])],
pos_channels=[(pos_metadata, [pos])],
zone_channels=[zone_metadata])
# if you want to inspect the data file instance, load the data
data_file.open_file()
# finally close the nix file
nix_file.close()
|
the-stack_0_22517 | # -*- coding: utf-8 -*-
################ Server Ver. 27 (2021. 2. 17.) #####################
import sys, os, ctypes
import asyncio, discord, aiohttp
import random, re, datetime, time, logging
from discord.ext import tasks, commands
from discord.ext.commands import CommandNotFound, MissingRequiredArgument
from gtts import gTTS
from github import Github
import base64
import gspread, boto3
from oauth2client.service_account import ServiceAccountCredentials #정산
from io import StringIO
import urllib.request
from math import ceil, floor
##################### 로깅 ###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
if not discord.opus.is_loaded():
discord.opus.load_opus(ctypes.util.find_library('opus'))
print("opus_loaded")
basicSetting = []
bossData = []
fixed_bossData = []
bossNum = 0
fixed_bossNum = 0
chkvoicechannel = 0
chkrelogin = 0
chflg = 0
LoadChk = 0
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
channel_info = []
channel_name = []
channel_id = []
channel_voice_name = []
channel_voice_id = []
channel_type = []
FixedBossDateData = []
indexFixedBossname = []
endTime = None
gc = None
credentials = None
regenembed = None
command = None
kill_Data = None
kill_Time = None
item_Data = None
tmp_racing_unit = None
setting_channel_name = None
boss_nick = {}
access_token = os.environ["BOT_TOKEN"]
git_access_token = os.environ["GIT_TOKEN"]
git_access_repo = os.environ["GIT_REPO"]
git_access_repo_restart = os.environ["GIT_REPO_RESTART"]
try:
aws_key = os.environ["AWS_KEY"]
aws_secret_key = os.environ["AWS_SECRET_KEY"]
except:
aws_key = ""
aws_secret_key = ""
g = Github(git_access_token)
repo = g.get_repo(git_access_repo)
repo_restart = g.get_repo(git_access_repo_restart)
#초성추출 함수
def convertToInitialLetters(text):
CHOSUNG_START_LETTER = 4352
JAMO_START_LETTER = 44032
JAMO_END_LETTER = 55203
JAMO_CYCLE = 588
def isHangul(ch):
return ord(ch) >= JAMO_START_LETTER and ord(ch) <= JAMO_END_LETTER
def isBlankOrNumber(ch):
return ord(ch) == 32 or ord(ch) >= 48 and ord(ch) <= 57
def convertNomalInitialLetter(ch):
dic_InitalLetter = {4352:"ㄱ"
,4353:"ㄲ"
,4354:"ㄴ"
,4355:"ㄷ"
,4356:"ㄸ"
,4357:"ㄹ"
,4358:"ㅁ"
,4359:"ㅂ"
,4360:"ㅃ"
,4361:"ㅅ"
,4362:"ㅆ"
,4363:"ㅇ"
,4364:"ㅈ"
,4365:"ㅉ"
,4366:"ㅊ"
,4367:"ㅋ"
,4368:"ㅌ"
,4369:"ㅍ"
,4370:"ㅎ"
,32:" "
,48:"0"
,49:"1"
,50:"2"
,51:"3"
,52:"4"
,53:"5"
,54:"6"
,55:"7"
,56:"8"
,57:"9"
}
return dic_InitalLetter[ord(ch)]
result = ""
for ch in text:
if isHangul(ch): #한글이 아닌 글자는 걸러냅니다.
result += convertNomalInitialLetter(chr((int((ord(ch)-JAMO_START_LETTER)/JAMO_CYCLE))+CHOSUNG_START_LETTER))
elif isBlankOrNumber(ch):
result += convertNomalInitialLetter(chr(int(ord(ch))))
return result
def init():
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_voice_name
global channel_voice_id
global channel_id
global channel_type
global LoadChk
global indexFixedBossname
global FixedBossDateData
global endTime
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
global kill_Time
global item_Data
global tmp_racing_unit
global boss_nick
command = []
tmp_bossData = []
tmp_fixed_bossData = []
FixedBossDateData = []
indexFixedBossname = []
kill_Data = {}
tmp_kill_Data = []
item_Data = {}
tmp_item_Data = []
f = []
fb = []
fk = []
fc = []
fi = []
tmp_racing_unit = []
boss_nick = {}
inidata = repo.get_contents("test_setting.ini")
file_data1 = base64.b64decode(inidata.content)
file_data1 = file_data1.decode('utf-8')
inputData = file_data1.split('\n')
command_inidata = repo.get_contents("command.ini")
file_data4 = base64.b64decode(command_inidata.content)
file_data4 = file_data4.decode('utf-8')
command_inputData = file_data4.split('\n')
boss_inidata = repo.get_contents("boss.ini")
file_data3 = base64.b64decode(boss_inidata.content)
file_data3 = file_data3.decode('utf-8')
boss_inputData = file_data3.split('\n')
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
kill_inidata = repo.get_contents("kill_list.ini")
file_data5 = base64.b64decode(kill_inidata.content)
file_data5 = file_data5.decode('utf-8')
kill_inputData = file_data5.split('\n')
item_inidata = repo.get_contents("item_list.ini")
file_data6 = base64.b64decode(item_inidata.content)
file_data6 = file_data6.decode('utf-8')
item_inputData = file_data6.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
index_fixed = 0
for value in FixedBossDateData:
if value.find('bossname') != -1:
indexFixedBossname.append(index_fixed)
index_fixed = index_fixed + 1
for i in range(inputData.count('\r')):
inputData.remove('\r')
for i in range(command_inputData.count('\r')):
command_inputData.remove('\r')
for i in range(boss_inputData.count('\r')):
boss_inputData.remove('\r')
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
for i in range(kill_inputData.count('\r')):
kill_inputData.remove('\r')
for i in range(item_inputData.count('\r')):
item_inputData.remove('\r')
del(command_inputData[0])
del(boss_inputData[0])
del(fixed_inputData[0])
del(kill_inputData[0])
del(item_inputData[0])
for data in boss_inputData:
if "kakaoOnOff" in data:
raise Exception("[boss.ini] 파일에서 [kakaoOnOff]를 지워주세요.")
for data in fixed_inputData:
if "kakaoOnOff" in data:
raise Exception("[fixed_boss.ini] 파일에서 [kakaoOnOff]를 지워주세요.")
############## 보탐봇 초기 설정 리스트 #####################
try:
basicSetting.append(inputData[0][11:]) #basicSetting[0] : timezone
basicSetting.append(inputData[8][15:]) #basicSetting[1] : before_alert
basicSetting.append(inputData[10][11:]) #basicSetting[2] : mungChk1
basicSetting.append(inputData[9][16:]) #basicSetting[3] : before_alert1
basicSetting.append(inputData[14][14:16]) #basicSetting[4] : restarttime 시
basicSetting.append(inputData[14][17:]) #basicSetting[5] : restarttime 분
basicSetting.append(inputData[1][15:]) #basicSetting[6] : voice채널 ID
basicSetting.append(inputData[2][14:]) #basicSetting[7] : text채널 ID
basicSetting.append(inputData[3][16:]) #basicSetting[8] : 사다리 채널 ID
basicSetting.append(inputData[13][14:]) #basicSetting[9] : !ㅂ 출력 수
basicSetting.append(inputData[17][11:]) #basicSetting[10] : json 파일명
basicSetting.append(inputData[4][17:]) #basicSetting[11] : 정산 채널 ID
basicSetting.append(inputData[16][12:]) #basicSetting[12] : sheet 이름
basicSetting.append(inputData[15][16:]) #basicSetting[13] : restart 주기
basicSetting.append(inputData[18][12:]) #basicSetting[14] : 시트 이름
basicSetting.append(inputData[19][12:]) #basicSetting[15] : 입력 셀
basicSetting.append(inputData[20][13:]) #basicSetting[16] : 출력 셀
basicSetting.append(inputData[12][13:]) #basicSetting[17] : 멍삭제횟수
basicSetting.append(inputData[5][14:]) #basicSetting[18] : kill채널 ID
basicSetting.append(inputData[6][16:]) #basicSetting[19] : racing 채널 ID
basicSetting.append(inputData[7][14:]) #basicSetting[20] : item 채널 ID
basicSetting.append(inputData[21][12:]) #basicSetting[21] : voice_use
basicSetting.append(inputData[11][11:]) #basicSetting[22] : mungChk2
except:
raise Exception("[test_setting.ini] 파일 양식을 확인하세요.")
############## 보탐봇 명령어 리스트 #####################
for i in range(len(command_inputData)):
tmp_command = command_inputData[i][12:].rstrip('\r')
fc = tmp_command.split(', ')
command.append(fc)
fc = []
#command.append(command_inputData[i][12:].rstrip('\r')) #command[0] ~ [24] : 명령어
################## 척살 명단 ###########################
for i in range(len(kill_inputData)):
tmp_kill_Data.append(kill_inputData[i].rstrip('\r'))
fk.append(tmp_kill_Data[i][:tmp_kill_Data[i].find(' ')])
fk.append(tmp_kill_Data[i][tmp_kill_Data[i].find(' ')+1:])
try:
kill_Data[fk[0]] = int(fk[1])
except:
pass
fk = []
for i in range(len(item_inputData)):
tmp_item_Data.append(item_inputData[i].rstrip('\r'))
fi.append(tmp_item_Data[i][:tmp_item_Data[i].find(' ')])
fi.append(tmp_item_Data[i][tmp_item_Data[i].find(' ')+1:])
try:
item_Data[fi[0]] = int(fi[1])
except:
pass
fi = []
tmp_killtime = datetime.datetime.now().replace(hour=int(5), minute=int(0), second = int(0))
kill_Time = datetime.datetime.now()
if tmp_killtime < kill_Time :
kill_Time = tmp_killtime + datetime.timedelta(days=int(1))
else:
kill_Time = tmp_killtime
for i in range(len(basicSetting)):
basicSetting[i] = basicSetting[i].strip()
try:
if basicSetting[6] != "":
basicSetting[6] = int(basicSetting[6])
if basicSetting[7] != "":
basicSetting[7] = int(basicSetting[7])
if basicSetting[8] != "":
basicSetting[8] = int(basicSetting[8])
if basicSetting[11] != "":
basicSetting[11] = int(basicSetting[11])
if basicSetting[18] != "":
basicSetting[18] = int(basicSetting[18])
if basicSetting[19] != "":
basicSetting[19] = int(basicSetting[19])
if basicSetting[20] != "":
basicSetting[20] = int(basicSetting[20])
except ValueError:
raise Exception("[test_setting.ini] 파일 양식을 확인하세요.")
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if int(basicSetting[13]) == 0 :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
endTime = endTime + datetime.timedelta(days=int(1000))
else :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
if endTime < tmp_now :
endTime = endTime + datetime.timedelta(days=int(basicSetting[13]))
bossNum = int(len(boss_inputData)/6)
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(bossNum):
tmp_bossData.append(boss_inputData[i*6:i*6+6])
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
for j in range(bossNum):
for i in range(len(tmp_bossData[j])):
tmp_bossData[j][i] = tmp_bossData[j][i].strip()
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
tmp_boss_name_list : list = []
tmp_nick : list = []
############## 일반보스 정보 리스트 #####################
for j in range(bossNum):
tmp_nick = []
tmp_len = tmp_bossData[j][1].find(':')
tmp_boss_name_list = tmp_bossData[j][0][11:].split(", ")
f.append(tmp_boss_name_list[0]) #bossData[0] : 보스명
if len(tmp_boss_name_list) > 1:
for nick in tmp_boss_name_list[1:]:
tmp_nick.append(nick)
tmp_nick.append(convertToInitialLetters(nick))
boss_nick[tmp_boss_name_list[0]] = tmp_nick
f.append(tmp_bossData[j][1][10:tmp_len]) #bossData[1] : 시
f.append(tmp_bossData[j][2][13:]) #bossData[2] : 멍/미입력
f.append(tmp_bossData[j][3][20:]) #bossData[3] : 분전 알림멘트
f.append(tmp_bossData[j][4][13:]) #bossData[4] : 젠 알림멘트
f.append(tmp_bossData[j][1][tmp_len+1:]) #bossData[5] : 분
f.append('') #bossData[6] : 메세지
f.append(tmp_bossData[j][5][11:]) #bossData[8] : 멍체크시간종류
bossData.append(f)
f = []
bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
tmp_bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
bossTimeString.append('99:99:99')
bossDateString.append('9999-99-99')
tmp_bossTimeString.append('99:99:99')
tmp_bossDateString.append('9999-99-99')
bossFlag.append(False)
bossFlag0.append(False)
bossMungFlag.append(False)
bossMungCnt.append(0)
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
try:
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
if tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])) <= fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[3])):
fixed_bossFlag0[j] = True
if fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])):
fixed_bossFlag[j] = True
fixed_bossFlag0[j] = True
except:
raise Exception(f"[fixed_boss.ini] 파일 {tmp_fixed_bossData[j][0][11:]} 부분 양식을 확인하세요.")
################# 이모지 로드 ######################
emo_inidata = repo.get_contents("emoji.ini")
emoji_data1 = base64.b64decode(emo_inidata.content)
emoji_data1 = emoji_data1.decode('utf-8')
emo_inputData = emoji_data1.split('\n')
for i in range(len(emo_inputData)):
tmp_emo = emo_inputData[i][8:].rstrip('\r')
if tmp_emo != "":
tmp_racing_unit.append(tmp_emo)
################# 리젠보스 시간 정렬 ######################
regenData = []
regenTime = []
regenbossName = []
outputTimeHour = []
outputTimeMin = []
for i in range(bossNum):
if bossData[i][2] == "1":
f.append(bossData[i][0] + "R")
else:
f.append(bossData[i][0])
f.append(bossData[i][1] + bossData[i][5])
regenData.append(f)
regenTime.append(bossData[i][1] + bossData[i][5])
f = []
regenTime = sorted(list(set(regenTime)))
for j in range(len(regenTime)):
for i in range(len(regenData)):
if regenTime[j] == regenData[i][1] :
f.append(regenData[i][0])
regenbossName.append(f)
try:
outputTimeHour.append(int(regenTime[j][:2]))
outputTimeMin.append(int(regenTime[j][2:]))
except ValueError:
raise Exception(f"[boss.ini] 파일 {f} gentime을 확인하시기 바랍니다.")
f = []
regenembed = discord.Embed(
title='----- 보스별 리스폰 시간 -----',
description= ' ')
for i in range(len(regenTime)):
if outputTimeMin[i] == 0 :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간', value= '```'+ ', '.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
else :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간' + str(outputTimeMin[i]) + '분', value= '```' + ','.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
regenembed.set_footer(text = 'R : 멍 보스')
##########################################################
if basicSetting[10] !="":
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] #정산
credentials = ServiceAccountCredentials.from_json_keyfile_name(basicSetting[10], scope) #정산
init()
channel = ''
#mp3 파일 생성함수(gTTS 이용, 남성목소리)
async def MakeSound(saveSTR, filename):
if aws_key != "" and aws_secret_key != "":
polly = boto3.client("polly", aws_access_key_id = aws_key, aws_secret_access_key = aws_secret_key, region_name = "eu-west-1")
s = '<speak><prosody rate="' + str(95) + '%">' + saveSTR + '</prosody></speak>'
response = polly.synthesize_speech(
TextType = "ssml",
Text=s,
OutputFormat="mp3",
VoiceId="Seoyeon")
stream = response.get("AudioStream")
with open(f"./{filename}.mp3", "wb") as mp3file:
data = stream.read()
mp3file.write(data)
else:
tts = gTTS(saveSTR, lang = 'ko')
tts.save(f"./{filename}.wav")
#mp3 파일 재생함수
async def PlaySound(voiceclient, filename):
if basicSetting[21] != "1":
return
# source = discord.FFmpegPCMAudio(filename)
source = discord.FFmpegOpusAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
# source.cleanup()
return
#my_bot.db 저장하기
async def dbSave():
global bossData
global bossNum
global bossTime
global bossTimeString
global bossDateString
global bossMungFlag
global bossMungCnt
for i in range(bossNum):
for j in range(bossNum):
if bossTimeString[i] and bossTimeString[j] != '99:99:99':
if bossTimeString[i] == bossTimeString[j] and i != j:
tmp_time1 = bossTimeString[j][:6]
tmp_time2 = (int(bossTimeString[j][6:]) + 1)%100
if tmp_time2 < 10 :
tmp_time22 = '0' + str(tmp_time2)
elif tmp_time2 == 60 :
tmp_time22 = '00'
else :
tmp_time22 = str(tmp_time2)
bossTimeString[j] = tmp_time1 + tmp_time22
datelist1 = bossTime
datelist = list(set(datelist1))
information1 = '----- 보스탐 정보 -----\n'
for timestring in sorted(datelist):
for i in range(bossNum):
if timestring == bossTime[i]:
if bossTimeString[i] != '99:99:99' or bossMungFlag[i] == True :
if bossMungFlag[i] == True :
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + tmp_bossTime[i].strftime('%H:%M:%S') + ' @ ' + tmp_bossTime[i].strftime('%Y-%m-%d') + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + tmp_bossTime[i].strftime('%H:%M:%S') + ' @ ' + tmp_bossTime[i].strftime('%Y-%m-%d') + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else:
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
try :
contents = repo.get_contents("my_bot.db")
repo.update_file(contents.path, "bossDB", information1, contents.sha)
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#my_bot.db 불러오기
async def dbLoad():
global LoadChk
contents1 = repo.get_contents("my_bot.db")
file_data = base64.b64decode(contents1.content)
file_data = file_data.decode('utf-8')
beforeBossData = file_data.split('\n')
if len(beforeBossData) > 1:
for i in range(len(beforeBossData)-1):
for j in range(bossNum):
startPos = beforeBossData[i+1].find('-')
endPos = beforeBossData[i+1].find('(')
if beforeBossData[i+1][startPos+2:endPos] == bossData[j][0] :
#if beforeBossData[i+1].find(bossData[j][0]) != -1 :
tmp_mungcnt = 0
tmp_len = beforeBossData[i+1].find(':')
tmp_datelen = beforeBossData[i+1].find('@')
tmp_msglen = beforeBossData[i+1].find('*')
years1 = beforeBossData[i+1][tmp_datelen+2:tmp_datelen+6]
months1 = beforeBossData[i+1][tmp_datelen+7:tmp_datelen+9]
days1 = beforeBossData[i+1][tmp_datelen+10:tmp_datelen+12]
hours1 = beforeBossData[i+1][tmp_len+2:tmp_len+4]
minutes1 = beforeBossData[i+1][tmp_len+5:tmp_len+7]
seconds1 = beforeBossData[i+1][tmp_len+8:tmp_len+10]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(year = int(years1), month = int(months1), day = int(days1), hour=int(hours1), minute=int(minutes1), second = int(seconds1))
if bossData[j][7] == "1":
tmp_now_chk = tmp_now + datetime.timedelta(minutes = int(basicSetting[2]))
else:
tmp_now_chk = tmp_now + datetime.timedelta(minutes = int(basicSetting[22]))
if tmp_now_chk < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[j][1]), minutes = int(bossData[j][5]))
while tmp_now_chk < now2 :
tmp_now_chk = tmp_now_chk + deltaTime
tmp_now = tmp_now + deltaTime
tmp_mungcnt = tmp_mungcnt + 1
if tmp_now_chk > now2 > tmp_now: #젠중.
bossMungFlag[j] = True
tmp_bossTime[j] = tmp_now
tmp_bossTimeString[j] = tmp_bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = tmp_bossTime[j].strftime('%Y-%m-%d')
bossTimeString[j] = '99:99:99'
bossDateString[j] = '9999-99-99'
bossTime[j] = tmp_bossTime[j] + datetime.timedelta(days=365)
else:
tmp_bossTime[j] = bossTime[j] = tmp_now
tmp_bossTimeString[j] = bossTimeString[j] = bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = bossDateString[j] = bossTime[j].strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[j] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[j] = True
if tmp_bossTime[j] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[j] = True
bossFlag0[j] = True
bossData[j][6] = beforeBossData[i+1][tmp_msglen+2:len(beforeBossData[i+1])]
if beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3] != 0 and beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] == ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
elif beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] != ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] + beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
else:
bossMungCnt[j] = 0
global FixedBossDateData
global fixed_bossFlag
global fixed_bossFlag0
global fixed_bossTime
global fixed_bossData
FixedBossDateData = []
fixed_bossFlag = []
fixed_bossFlag0 = []
fixed_bossTime = []
fixed_bossData = []
tmp_fixed_bossData = []
fb = []
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
del(fixed_inputData[0])
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
try:
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
if tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])) <= fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[3])):
fixed_bossFlag0[j] = True
if fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])):
fixed_bossFlag[j] = True
fixed_bossFlag0[j] = True
except:
raise Exception(f"[fixed_boss.ini] 파일 {tmp_fixed_bossData[j][0]} 부분 양식을 확인하세요.")
LoadChk = 0
print ("<불러오기 완료>")
else:
LoadChk = 1
print ("보스타임 정보가 없습니다.")
#고정보스 날짜저장
async def FixedBossDateSave():
global fixed_bossData
global fixed_bossTime
global fixed_bossNum
global FixedBossDateData
global indexFixedBossname
for i in range(fixed_bossNum):
FixedBossDateData[indexFixedBossname[i] + 3] = 'startDate = '+ fixed_bossTime[i].strftime('%Y-%m-%d') + '\n'
FixedBossDateDataSTR = ""
for j in range(len(FixedBossDateData)):
pos = len(FixedBossDateData[j])
tmpSTR = FixedBossDateData[j][:pos-1] + '\r\n'
FixedBossDateDataSTR += tmpSTR
contents = repo.get_contents("fixed_boss.ini")
repo.update_file(contents.path, "bossDB", FixedBossDateDataSTR, contents.sha)
#사다리함수
async def LadderFunc(number, ladderlist, channelVal):
result_ladder = random.sample(ladderlist, number)
lose_member = [item for item in ladderlist if item not in result_ladder]
result_ladderSTR = ','.join(map(str, result_ladder))
embed = discord.Embed(title = "🎲 사다리! 묻고 더블로 가!",color=0x00ff00)
embed.add_field(name = "👥 참가자", value = f"```fix\n{', '.join(ladderlist)}```", inline=False)
embed.add_field(name = "😍 당첨", value = f"```fix\n{', '.join(result_ladder)}```")
embed.add_field(name = "😭 낙첨", value = f"```{', '.join(lose_member)}```")
await channelVal.send(embed=embed, tts=False)
#data초기화
async def init_data_list(filename, first_line : str = "-----------"):
try :
contents = repo.get_contents(filename)
repo.update_file(contents.path, "deleted list " + str(filename), first_line, contents.sha)
print ('< 데이터 초기화 >')
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#data저장
async def data_list_Save(filename, first_line : str = "-----------", save_data : dict = {}):
output_list = first_line+ '\n'
for key, value in save_data.items():
output_list += str(key) + ' ' + str(value) + '\n'
try :
contents = repo.get_contents(filename)
repo.update_file(contents.path, "updated " + str(filename), output_list, contents.sha)
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now()
print (errortime)
pass
#서버(길드) 정보
async def get_guild_channel_info(bot):
text_channel_name : list = []
text_channel_id : list = []
voice_channel_name : list = []
voice_channel_id : list = []
for guild in bot.guilds:
for text_channel in guild.text_channels:
text_channel_name.append(text_channel.name)
text_channel_id.append(str(text_channel.id))
for voice_channel in guild.voice_channels:
voice_channel_name.append(voice_channel.name)
voice_channel_id.append(str(voice_channel.id))
return text_channel_name, text_channel_id, voice_channel_name, voice_channel_id
class taskCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.checker = True
self.main_task.start()
@tasks.loop(seconds=1.0, count=1)
async def main_task(self):
boss_task = asyncio.get_event_loop().create_task(self.boss_check())
await boss_task
@main_task.before_loop
async def before_tast(self):
await self.bot.wait_until_ready()
################ 명존쎄 ################
@commands.command(name=command[8][0], aliases=command[8][1:])
async def command_task_list(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
for t in asyncio.Task.all_tasks():
# print(t._coro.__name__)
if t._coro.__name__ == f"boss_check":
if t.done():
try:
t.exception()
except asyncio.CancelledError:
continue
continue
t.cancel()
# await ctx.send( '< 보탐봇 명치 맞고 숨 고르기 중! 잠시만요! >', tts=False)
try:
file = discord.File("./명치.JPG")
await ctx.send(file = file)
except:
await ctx.send( '< 보탐봇 명치 맞고 숨 고르기 중! 잠시만요! >', tts=False)
print("명치!")
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
for vc in self.bot.voice_clients:
if vc.guild.id == int(ctx.guild.id):
if vc.is_playing():
vc.stop()
await vc.disconnect(force=True)
if basicSetting[21] != "1":
print("명치복구완료!")
await dbLoad()
await self.bot.get_channel(channel).send( '< 다시 왔습니다!(보이스 미사용) >', tts=False)
self.checker = True
boss_task = asyncio.Task(self.boss_check())
return
async def boss_check(self):
await self.bot.wait_until_ready()
global channel
global endTime
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global endTime
global kill_Time
if chflg == 1 :
if len(self.bot.voice_clients) == 0 :
if basicSetting[21] == "1":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
if self.bot.voice_clients[0].is_connected() :
await self.bot.get_channel(channel).send( '< 다시 왔습니다! >', tts=False)
self.checker = True
print("명치복구완료!")
except:
await self.bot.get_channel(channel).send( '< 음성채널 접속 에러! >', tts=False)
self.checker = False
print("명치복구실패!")
pass
await dbLoad()
while True:
############ 워닝잡자! ############
if log_stream.getvalue().find("Awaiting") != -1:
log_stream.truncate(0)
log_stream.seek(0)
await self.bot.get_channel(channel).send( '< 디코접속에러! 잠깐 나갔다 올께요! >', tts=False)
await dbSave()
break
log_stream.truncate(0)
log_stream.seek(0)
##################################
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
priv0 = now+datetime.timedelta(minutes=int(basicSetting[3]))
priv = now+datetime.timedelta(minutes=int(basicSetting[1]))
tmp_aftr1 = now+datetime.timedelta(minutes=int(0-int(basicSetting[2])))
tmp_aftr2 = now+datetime.timedelta(minutes=int(0-int(basicSetting[22])))
if channel != '':
################ 보탐봇 재시작 ################
if endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S'):
await dbSave()
await FixedBossDateSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
print("보탐봇재시작!")
endTime = endTime + datetime.timedelta(days = int(basicSetting[13]))
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
############# 음성접속! ###########
if len(self.bot.voice_clients) == 0 and self.checker and basicSetting[21] == "1":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 채널 자동 재접속완료!")
except discord.errors.ClientException as e:
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 자동 접속 부분에서 서버 음성 채널 이미 접속 에러 : {e}")
self.checker = False
pass
except Exception as e:
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 자동 접속 부분에서 서버 음성 채널 타임아웃 에러 : {e}")
self.checker = False
pass
if not self.bot.voice_clients[0].is_connected():
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 채널 자동 복구실패!")
await self.bot.get_channel(channel).send( '< 음성 채널 접속에 실패하였습니다. 잠시 후 음성 채널 접속을 시도해주세요! >')
self.checker = False
pass
################ 킬 목록 초기화 ################
if kill_Time.strftime('%Y-%m-%d ') + kill_Time.strftime('%H:%M') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M'):
kill_Time = kill_Time + datetime.timedelta(days=int(1))
await init_data_list('kill_list.ini', '-----척살명단-----')
################ 고정 보스 확인 ################
for i in range(fixed_bossNum):
if int(basicSetting[3]) == 0:
fixed_bossFlag0[i] = True
if int(basicSetting[1]) == 0:
fixed_bossFlag[i] = True
################ before_alert1 ################
if fixed_bossTime[i] <= priv0 and fixed_bossTime[i] > priv:
if basicSetting[3] != '0':
if fixed_bossFlag0[i] == False:
fixed_bossFlag0[i] = True
await self.bot.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '알림1.mp3')
except:
pass
################ before_alert ################
if fixed_bossTime[i] <= priv and fixed_bossTime[i] > now and fixed_bossFlag0[i] == True :
if basicSetting[1] != '0' :
if fixed_bossFlag[i] == False:
fixed_bossFlag[i] = True
await self.bot.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '알림.mp3')
except:
pass
################ 보스 젠 시간 확인 ################
if fixed_bossTime[i] <= now and fixed_bossFlag[i] == True and fixed_bossFlag0[i] == True :
fixed_bossTime[i] = fixed_bossTime[i]+datetime.timedelta(hours=int(fixed_bossData[i][5]), minutes=int(fixed_bossData[i][6]), seconds = int(0))
fixed_bossFlag0[i] = False
fixed_bossFlag[i] = False
embed = discord.Embed(
description= "```" + fixed_bossData[i][0] + fixed_bossData[i][4] + "```" ,
color=0x00ff00
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '젠.mp3')
except:
pass
################ 일반 보스 확인 ################
for i in range(bossNum):
if int(basicSetting[3]) == 0:
bossFlag0[i] = True
if int(basicSetting[1]) == 0:
bossFlag[i] = True
################ before_alert1 ################
if bossTime[i] <= priv0 and bossTime[i] > priv:
if basicSetting[3] != '0':
if bossFlag0[i] == False:
bossFlag0[i] = True
if bossData[i][6] != '' :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '알림1.mp3')
except:
pass
################ before_alert ################
if bossTime[i] <= priv and bossTime[i] > now and bossFlag0[i] == True:
if basicSetting[1] != '0' :
if bossFlag[i] == False:
bossFlag[i] = True
if bossData[i][6] != '' :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '알림.mp3')
except:
pass
################ 보스 젠 시간 확인 ################
if bossTime[i] <= now and bossFlag0[i] == True and bossFlag[i] == True :
#print ('if ', bossTime[i])
bossMungFlag[i] = True
tmp_bossTime[i] = bossTime[i]
tmp_bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
tmp_bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
bossTime[i] = now+datetime.timedelta(days=365)
if bossData[i][6] != '' :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + '\n<' + bossData[i][6] + '>```' ,
color=0x00ff00
)
else :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + "```" ,
color=0x00ff00
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '젠.mp3')
except:
pass
################ 보스 자동 멍 처리 ################
if bossMungFlag[i] == True:
if bossData[i][7] == "1":
aftr = tmp_aftr1
else:
aftr = tmp_aftr2
if (bossTime[i]+datetime.timedelta(days=-365)) <= aftr:
if basicSetting[2] != '0' and basicSetting[22] != '0' and bossFlag[i] == True and bossFlag0[i] == True and bossMungFlag[i] == True :
if int(basicSetting[17]) <= bossMungCnt[i] and int(basicSetting[17]) != 0:
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if bossData[i][2] == '0':
await self.bot.get_channel(channel).send(f'```자동 미입력 횟수 {basicSetting[17]}회 초과! [{bossData[i][0]}] 삭제!```', tts=False)
print ('자동미입력 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
else:
await self.bot.get_channel(channel).send(f'```자동 멍처리 횟수 {basicSetting[17]}회 초과! [{bossData[i][0]}] 삭제!```', tts=False)
print ('자동멍처리 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
#await dbSave()
else:
################ 미입력 보스 ################
if bossData[i][2] == '0':
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' 미입력 됐습니다.```', tts=False)
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '미입력.mp3')
except:
pass
################ 멍 보스 ################
else :
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' 멍 입니다.```')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '멍.mp3')
except:
pass
await asyncio.sleep(1) # task runs every 60 seconds
self.checker = False
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
for t in asyncio.Task.all_tasks():
if t._coro.__name__ == f"boss_check":
print("-------------")
if t.done():
try:
t.exception()
except asyncio.CancelledError:
continue
continue
t.cancel()
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
boss_task = asyncio.Task(self.boss_check())
class mainCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
################ 보탐봇 입장 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[0][0], aliases=command[0][1:])
async def join_(self, ctx):
global basicSetting
global chflg
if basicSetting[7] == "":
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
print ('[ ', basicSetting[7], ' ]')
print ('] ', ctx.message.channel.name, ' [')
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith("textchannel ="):
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = channel
#print ('======', inputData_text[i])
result_textCH = '\n'.join(inputData_textCH)
#print (result_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await ctx.send(f"< 텍스트채널 [{ctx.message.channel.name}] 접속완료 >\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >", tts=False)
print('< 텍스트채널 [' + ctx.guild.get_channel(basicSetting[7]).name + '] 접속완료>')
if basicSetting[6] != "" and basicSetting[21] == "1":
try:
await ctx.guild.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print('< 음성채널 [' + ctx.guild.get_channel(basicSetting[6]).name + '] 접속완료>')
except:
print('< 음성채널 [' + ctx.guild.get_channel(basicSetting[6]).name + '] 접속에러! >')
pass
if basicSetting[8] != "":
if str(basicSetting[8]) in channel_id:
print('< 사다리채널 [' + ctx.guild.get_channel(int(basicSetting[8])).name + '] 접속완료 >')
else:
basicSetting[8] = ""
print(f"사다리채널 ID 오류! [{command[28][0]} 사다리] 명령으로 재설정 바랍니다.")
if basicSetting[11] != "":
if str(basicSetting[11]) in channel_id:
print('< 정산채널 [' + ctx.guild.get_channel(int(basicSetting[11])).name + '] 접속완료>')
else:
basicSetting[11] = ""
print(f"정산채널 ID 오류! [{command[28][0]} 정산] 명령으로 재설정 바랍니다.")
if basicSetting[18] != "":
if str(basicSetting[18]) in channel_id:
print('< 척살채널 [' + ctx.guild.get_channel(int(basicSetting[18])).name + '] 접속완료>')
else:
basicSetting[18] = ""
print(f"척살채널 ID 오류! [{command[28][0]} 척살] 명령으로 재설정 바랍니다.")
if basicSetting[19] != "":
if str(basicSetting[19]) in channel_id:
print('< 경주채널 [' + ctx.guild.get_channel(int(basicSetting[19])).name + '] 접속완료>')
else:
basicSetting[19] = ""
print(f"경주채널 ID 오류! [{command[28][0]} 경주] 명령으로 재설정 바랍니다.")
if basicSetting[20] != "":
if str(basicSetting[20]) in channel_id:
print('< 아이템채널 [' + ctx.guild.get_channel(int(basicSetting[20])).name + '] 접속완료>')
else:
basicSetting[20] = ""
print(f"아이템채널 ID 오류! [{command[28][0]} 아이템] 명령으로 재설정 바랍니다.")
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
else:
curr_guild_info = None
for guild in self.bot.guilds:
for text_channel in guild.text_channels:
if basicSetting[7] == text_channel.id:
curr_guild_info = guild
emoji_list : list = ["⭕", "❌"]
guild_error_message = await ctx.send(f"이미 **[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널이 명령어 채널로 설정되어 있습니다.\n해당 채널로 명령어 채널을 변경 하시려면 ⭕ 그대로 사용하시려면 ❌ 를 눌러주세요.\n(10초이내 미입력시 기존 설정 그대로 설정됩니다.)", tts=False)
for emoji in emoji_list:
await guild_error_message.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == guild_error_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list)
try:
reaction, user = await self.bot.wait_for('reaction_add', check = reaction_check, timeout = 10)
except asyncio.TimeoutError:
return await ctx.send(f"시간이 초과됐습니다. **[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널에서 사용해주세요!")
if str(reaction) == "⭕":
if ctx.voice_client is not None:
await ctx.voice_client.disconnect(force=True)
basicSetting[6] = ""
basicSetting[7] = int(ctx.message.channel.id)
print ('[ ', basicSetting[7], ' ]')
print ('] ', ctx.message.channel.name, ' [')
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith("textchannel ="):
inputData_textCH[i] = 'textchannel = ' + str(basicSetting[7]) + '\r'
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
return await ctx.send(f"명령어 채널이 **[{ctx.author.guild.name}]** 서버 **[{ctx.message.channel.name}]** 채널로 새로 설정되었습니다.\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >")
else:
return await ctx.send(f"명령어 채널 설정이 취소되었습니다.\n**[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널에서 사용해주세요!")
################ 보탐봇 메뉴 출력 ################
@commands.command(name=command[1][0], aliases=command[1][1:])
async def menu_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
command_list = ''
command_list += ','.join(command[2]) + '\n' #!설정확인
command_list += ','.join(command[3]) + '\n' #!채널확인
command_list += ','.join(command[4]) + ' [채널명]\n' #!채널이동
command_list += ','.join(command[5]) + ' ※ 관리자만 실행 가능\n' #!소환
command_list += ','.join(command[6]) + '\n' #!불러오기
command_list += ','.join(command[7]) + '\n' #!초기화
command_list += ','.join(command[8]) + '\n' #!명치
command_list += ','.join(command[9]) + '\n' #!재시작
command_list += ','.join(command[10]) + '\n' #!미예약
command_list += ','.join(command[11]) + ' [인원] [금액]\n' #!분배
command_list += ','.join(command[12]) + ' [뽑을인원수] [아이디1] [아이디2]...\n' #!사다리
command_list += ','.join(command[27]) + ' [아이디1] [아이디2]...(최대 12명)\n' #!경주
command_list += ','.join(command[41]) + ' [추첨인원] (대기시간/초) *(메모)\n' #!럭키박스
command_list += ','.join(command[35]) + ' [판매금액] (거래소세금)\n' #!수수료
command_list += ','.join(command[36]) + ' [거래소금액] [실거래금액] (거래소세금)\n' #!페이백
command_list += ','.join(command[13]) + ' [아이디]\n' #!정산
command_list += ','.join(command[14]) + ' 또는 ' + ','.join(command[14]) + ' 0000, 00:00\n' #!보스일괄
command_list += ','.join(command[40]) + ' 또는 ' + ','.join(command[40]) + ' 0000, 00:00\n' #!멍일괄
command_list += ','.join(command[15]) + '\n' #!q
command_list += ','.join(command[16]) + ' [할말]\n' #!v
command_list += ','.join(command[17]) + '\n' #!리젠
command_list += ','.join(command[18]) + '\n' #!현재시간
command_list += ','.join(command[24]) + '\n' #!킬초기화
command_list += ','.join(command[25]) + '\n' #!킬횟수 확인
command_list += ','.join(command[25]) + ' [아이디]\n' #!킬
command_list += ','.join(command[26]) + ' [아이디]\n' #!킬삭제
command_list += ','.join(command[33]) + ' [아이디] 또는 ' + ','.join(command[33]) + ' [아이디] [횟수]\n' #!킬차감
command_list += ','.join(command[29]) + '\n' #!아이템 목록 초기화
command_list += ','.join(command[30]) + '\n' #!아이템 목록 확인
command_list += ','.join(command[30]) + ' [아이템] 또는 ' + ','.join(command[30]) + ' [아이템] [개수]\n' #!아이템 목록 입력
command_list += ','.join(command[31]) + ' [아이템]\n' #!아이템 목록에서 삭제
command_list += ','.join(command[32]) + ' [아이템] 또는 ' + ','.join(command[32]) + ' [아이템] [개수]\n' #!아이템 차감
command_list += ','.join(command[19]) + '\n' #!공지
command_list += ','.join(command[19]) + ' [공지내용]\n' #!공지
command_list += ','.join(command[20]) + '\n' #!공지삭제
command_list += ','.join(command[21]) + ' [할말]\n' #!상태
command_list += ','.join(command[28]) + ' 사다리, 정산, 척살, 경주, 아이템\n' #!채널설정
command_list += ','.join(command[42]) + ' 사다리, 정산, 척살, 경주, 아이템\n' #!채널삭제
command_list += ','.join(command[34]) + ' ※ 관리자만 실행 가능\n\n' #서버나가기
command_list += ','.join(command[22]) + '\n' #보스탐
command_list += ','.join(command[23]) + '\n' #!보스탐
command_list += '[보스명]컷 또는 [보스명]컷 0000, 00:00\n'
command_list += '[보스명] 컷 또는 [보스명] 컷 0000, 00:00\n'
command_list += '[보스명]멍 또는 [보스명] 0000, 00:00\n'
command_list += '[보스명]예상 또는 [보스명]예상 0000, 00:00\n'
command_list += '[보스명]삭제\n'
command_list += '[보스명]메모 [할말]\n'
embed = discord.Embed(
title = "----- 명령어 -----",
description= '```' + command_list + '```',
color=0xff00ff
)
embed.add_field(
name="----- 추가기능 -----",
value= '```- [보스명]컷/멍/예상 [할말] : 보스시간 입력 후 빈칸 두번!! 메모 가능\n- [보스명]컷 명령어는 초성으로 입력가능합니다.\n ex)' + bossData[0][0] + '컷 => ' + convertToInitialLetters(bossData[0][0] +'컷') + ', ' + bossData[0][0] + ' 컷 => ' + convertToInitialLetters(bossData[0][0] +' 컷') + '```'
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 보탐봇 기본 설정확인 ################
@commands.command(name=command[2][0], aliases=command[2][1:])
async def setting_(self, ctx):
#print (ctx.message.channel.id)
if ctx.message.channel.id == basicSetting[7]:
setting_val = '보탐봇버전 : Server Ver. 27 (2021. 2. 17.)\n'
if basicSetting[6] != "" :
setting_val += '음성채널 : ' + self.bot.get_channel(basicSetting[6]).name + '\n'
setting_val += '텍스트채널 : ' + self.bot.get_channel(basicSetting[7]).name +'\n'
if basicSetting[8] != "" :
setting_val += '사다리채널 : ' + self.bot.get_channel(int(basicSetting[8])).name + '\n'
if basicSetting[11] != "" :
setting_val += '정산채널 : ' + self.bot.get_channel(int(basicSetting[11])).name + '\n'
if basicSetting[18] != "" :
setting_val += '척살채널 : ' + self.bot.get_channel(int(basicSetting[18])).name + '\n'
if basicSetting[19] != "" :
setting_val += '경주채널 : ' + self.bot.get_channel(int(basicSetting[19])).name + '\n'
if basicSetting[20] != "" :
setting_val += '아이템채널 : ' + self.bot.get_channel(int(basicSetting[20])).name + '\n'
setting_val += '보스젠알림시간1 : ' + basicSetting[1] + ' 분 전\n'
setting_val += '보스젠알림시간2 : ' + basicSetting[3] + ' 분 전\n'
setting_val += '보스멍확인시간1 : ' + basicSetting[2] + ' 분 후\n'
setting_val += '보스멍확인시간2 : ' + basicSetting[22] + ' 분 후\n'
if basicSetting[21] == "0":
setting_val += '보이스사용여부 : 사용안함\n'
else:
setting_val += '보이스사용여부 : 사용중\n'
embed = discord.Embed(
title = "----- 설정내용 -----",
description= f'```{setting_val}```',
color=0xff00ff
)
embed.add_field(
name="----- Special Thanks to. -----",
value= '```총무, 옹님, 공부중, 꽃신, 별빛, 크마, D.H.Kim, K.H.Sim, 쿠쿠, 오브로드, D.H.Oh, Bit, 팥빵, 천려, 이파리, 도미, 일깡, B.Park```'
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 서버 채널 확인 ################
@commands.command(name=command[3][0], aliases=command[3][1:])
async def chChk_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
channel_name, channel_id, channel_voice_name, channel_voice_id = await get_guild_channel_info(self.bot)
ch_information = []
cnt = 0
ch_information.append("")
ch_voice_information = []
cntV = 0
ch_voice_information.append("")
for guild in self.bot.guilds:
ch_information[cnt] = f"{ch_information[cnt]}👑 {guild.name} 👑\n"
for i in range(len(channel_name)):
for text_channel in guild.text_channels:
if channel_id[i] == str(text_channel.id):
if len(ch_information[cnt]) > 900 :
ch_information.append("")
cnt += 1
ch_information[cnt] = f"{ch_information[cnt]}[{channel_id[i]}] {channel_name[i]}\n"
ch_voice_information[cntV] = f"{ch_voice_information[cntV]}👑 {guild.name} 👑\n"
for i in range(len(channel_voice_name)):
for voice_channel in guild.voice_channels:
if channel_voice_id[i] == str(voice_channel.id):
if len(ch_voice_information[cntV]) > 900 :
ch_voice_information.append("")
cntV += 1
ch_voice_information[cntV] = f"{ch_voice_information[cntV]}[{channel_voice_id[i]}] {channel_voice_name[i]}\n"
######################
if len(ch_information) == 1 and len(ch_voice_information) == 1:
embed = discord.Embed(
title = "----- 채널 정보 -----",
description = '',
color=0xff00ff
)
embed.add_field(
name="< 택스트 채널 >",
value= '```' + ch_information[0] + '```',
inline = False
)
embed.add_field(
name="< 보이스 채널 >",
value= '```' + ch_voice_information[0] + '```',
inline = False
)
await ctx.send( embed=embed, tts=False)
else :
embed = discord.Embed(
title = "----- 채널 정보 -----\n< 택스트 채널 >",
description= '```' + ch_information[0] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(ch_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_information[i+1] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
embed = discord.Embed(
title = "< 음성 채널 >",
description= '```' + ch_voice_information[0] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(ch_voice_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_voice_information[i+1] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 텍스트채널이동 ################
@commands.command(name=command[4][0], aliases=command[4][1:])
async def chMove_(self, ctx):
global basicSetting
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = None
for i in range(len(channel_name)):
if channel_name[i] == msg:
channel = int(channel_id[i])
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('textchannel ='):
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = int(channel)
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await ctx.send( f"명령어 채널이 < {ctx.message.channel.name} >에서 < {self.bot.get_channel(channel).name} > 로 이동되었습니다.", tts=False)
await self.bot.get_channel(channel).send( f"< {self.bot.get_channel(channel).name} 이동완료 >", tts=False)
else:
return
################ 보탐봇 음성채널 소환 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[5][0], aliases=command[5][1:])
async def connectVoice_(self, ctx):
global basicSetting
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
if ctx.voice_client is None:
if ctx.author.voice:
try:
await ctx.author.voice.channel.connect(reconnect=True, timeout=5)
except:
await ctx.send('음성채널에 접속에 실패하였습니다.', tts=False)
pass
else:
await ctx.send('음성채널에 먼저 들어가주세요.', tts=False)
return
else:
if ctx.voice_client.is_playing():
ctx.voice_client.stop()
await ctx.voice_client.move_to(ctx.author.voice.channel)
voice_channel = ctx.author.voice.channel
print ('< ', basicSetting[6], ' >')
print ('> ', self.bot.get_channel(voice_channel.id).name, ' <')
if basicSetting[6] == "":
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i].startswith('voicechannel ='):
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
elif basicSetting[6] != int(voice_channel.id):
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i].startswith('voicechannel ='):
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
await ctx.send('< 음성채널 [' + self.bot.get_channel(voice_channel.id).name + '] 접속완료>', tts=False)
else:
return
################ my_bot.db에 저장된 보스타임 불러오기 ################
@commands.command(name=command[6][0], aliases=command[6][1:])
async def loadDB_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
await dbLoad()
if LoadChk == 0:
await ctx.send('<불러오기 완료>', tts=False)
else:
await ctx.send('<보스타임 정보가 없습니다.>', tts=False)
else:
return
################ 저장된 정보 초기화 ################
@commands.command(name=command[7][0], aliases=command[7][1:])
async def initVal_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global FixedBossDateData
global indexFixedBossname
if ctx.message.channel.id == basicSetting[7]:
basicSetting = []
bossData = []
fixed_bossData = []
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
FixedBossDateData = []
indexFixedBossname = []
init()
await dbSave()
await ctx.send('< 초기화 완료 >', tts=False)
print ("< 초기화 완료 >")
else:
return
################ 보탐봇 재시작 ################
@commands.command(name=command[9][0], aliases=command[9][1:])
async def restart_(self, ctx):
global basicSetting
global bossTimeString
global bossDateString
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[2] != '0' and basicSetting[22] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
print("보탐봇강제재시작!")
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
else:
return
################ 미예약 보스타임 출력 ################
@commands.command(name=command[10][0], aliases=command[10][1:])
async def nocheckBoss_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
if len(tmp_boss_information) == 1:
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
else:
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 분배 결과 출력 ################
@commands.command(name=command[11][0], aliases=command[11][1:])
async def bunbae_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
separate_money = []
separate_money = msg.split(" ")
num_sep = floor(int(separate_money[0]))
cal_tax1 = floor(float(separate_money[1])*0.05)
real_money = floor(floor(float(separate_money[1])) - cal_tax1)
cal_tax2 = floor(real_money/num_sep) - floor(float(floor(real_money/num_sep))*0.95)
if num_sep == 0 :
await ctx.send('```분배 인원이 0입니다. 재입력 해주세요.```', tts=False)
else :
embed = discord.Embed(
title = "----- 분배결과! -----",
description= '```1차 세금 : ' + str(cal_tax1) + '\n1차 수령액 : ' + str(real_money) + '\n분배자 거래소등록금액 : ' + str(floor(real_money/num_sep)) + '\n2차 세금 : ' + str(cal_tax2) + '\n인당 실수령액 : ' + str(floor(float(floor(real_money/num_sep))*0.95)) + '```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 사다리 결과 출력 ################
@commands.command(name=command[12][0], aliases=command[12][1:])
async def ladder_(self, ctx : commands.Context, *, args : str = None):
if basicSetting[8] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[8]:
if not args:
return await ctx.send(f'```명령어 [인원] [아이디1] [아이디2] ... 형태로 입력해주시기 바랍나다.```')
ladder = args.split()
try:
num_cong = int(ladder[0]) # 뽑을 인원
del(ladder[0])
except ValueError:
return await ctx.send(f'```뽑을 인원은 숫자로 입력바랍니다\nex)!사다리 1 가 나 다 ...```')
if num_cong >= len(ladder):
return await ctx.send(f'```추첨인원이 총 인원과 같거나 많습니다. 재입력 해주세요```')
if len(ladder) > 20:
await LadderFunc(num_cong, ladder, ctx)
return
input_dict : dict = {}
ladder_description : list = []
ladder_data : list = []
output_list : list = []
result :dict = {}
for i in range(len(ladder)):
input_dict[f"{i+1}"] = ladder[i]
if i < num_cong:
output_list.append("o")
else:
output_list.append("x")
for i in range(len(ladder)+1):
tmp_list = []
if i%2 != 0:
sample_list = ["| |-", "| | "]
else:
sample_list = ["| | ", "|-| "]
for i in range(len(ladder)//2):
value = random.choice(sample_list)
tmp_list.append(value)
ladder_description.append(tmp_list)
tmp_result = list(input_dict.keys())
input_data : str = ""
for i in range(len(tmp_result)):
if int(tmp_result[i]) < 9:
input_data += f"{tmp_result[i]} "
else:
input_data += f"{tmp_result[i]}"
input_value_data = " ".join(list(input_dict.values()))
for i in range(len(ladder_description)):
if (len(ladder) % 2) != 0:
ladder_data.append(f"{''.join(ladder_description[i])}|\n")
else:
ladder_data.append(f"{''.join(ladder_description[i])[:-1]}\n")
random.shuffle(output_list)
output_data = list(" ".join(output_list))
for line in reversed(ladder_data):
for i, x in enumerate(line):
if i % 2 == 1 and x == '-':
output_data[i-1], output_data[i+1] = output_data[i+1], output_data[i-1]
for i in range(output_data.count(" ")):
output_data.remove(" ")
for i in range(len(tmp_result)):
result[tmp_result[i]] = output_data[i]
result_str : str = ""
join_member : list = []
win_member : list = []
lose_member : list = []
for x, y in result.items():
join_member.append(f"{x}:{input_dict[f'{x}']}")
if y == "o":
win_member.append(f"{input_dict[f'{x}']}")
else :
lose_member.append(f"{input_dict[f'{x}']}")
embed = discord.Embed(title = "🎲 사다리! 묻고 더블로 가!",
color=0x00ff00
)
embed.description = f"||```{input_data}\n{''.join(ladder_data)}{' '.join(output_list)}```||"
embed.add_field(name = "👥 참가자", value = f"```fix\n{', '.join(join_member)}```", inline=False)
embed.add_field(name = "😍 당첨", value = f"```fix\n{', '.join(win_member)}```")
embed.add_field(name = "😭 낙첨", value = f"```{', '.join(lose_member)}```")
return await ctx.send(embed = embed)
else:
return
################ 정산확인 ################
@commands.command(name=command[13][0], aliases=command[13][1:])
async def jungsan_(self, ctx):
if basicSetting[11] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[11]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = msg
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' 님이 받을 다이야는 ' + result + ' 다이야 입니다.```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 보스타임 일괄 설정 ################
@commands.command(name=command[14][0], aliases=command[14][1:])
async def allBossInput_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
for i in range(bossNum):
if bossTimeString[i] == '99:99:99':
tmp_msg = msg
if len(tmp_msg) > 3 :
if tmp_msg.find(':') != -1 :
chkpos = tmp_msg.find(':')
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(tmp_msg)-2
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await ctx.send('<보스 일괄 입력 완료>', tts=False)
print ("<보스 일괄 입력 완료>")
else:
return
################ 멍보스타임 일괄 설정 ################
@commands.command(name=command[40][0], aliases=command[40][1:])
async def mungBossInput_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
for i in range(bossNum):
if bossData[i][2] == "1" and bossTimeString[i] == '99:99:99':
tmp_msg = msg
if len(tmp_msg) > 3 :
if tmp_msg.find(':') != -1 :
chkpos = tmp_msg.find(':')
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(tmp_msg)-2
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await ctx.send('<멍보스 일괄 입력 완료>', tts=False)
print ("<멍보스 일괄 입력 완료>")
else:
return
################ 가장 근접한 보스타임 출력 ################
@commands.command(name=command[15][0], aliases=command[15][1:])
async def nearTimeBoss_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
checkTime = datetime.datetime.now() + datetime.timedelta(days=1, hours = int(basicSetting[0]))
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
sorted_datelist = []
for i in range(bossNum):
if bossMungFlag[i] != True and bossTimeString[i] != '99:99:99' :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossMungFlag[i] != True :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
tmp_sorted_datelist = sorted(datelist)
for i in range(len(tmp_sorted_datelist)):
if checkTime > tmp_sorted_datelist[i]:
sorted_datelist.append(tmp_sorted_datelist[i])
if len(sorted_datelist) == 0:
await ctx.send( '<보스타임 정보가 없습니다.>', tts=False)
else :
result_lefttime = ''
if len(sorted_datelist) > int(basicSetting[9]):
for j in range(int(basicSetting[9])):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
else :
for j in range(len(sorted_datelist)):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
embed = discord.Embed(
description= result_lefttime,
color=0xff0000
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 음성파일 생성 후 재생 ################
@commands.command(name=command[16][0], aliases=command[16][1:])
async def playText_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
msg = ctx.message.content[len(ctx.invoked_with)+1:]
sayMessage = msg
try:
await MakeSound(ctx.message.author.display_name +'님이, ' + sayMessage, './sound/say')
except:
await ctx.send( f"```음성파일 생성에 실패하였습니다.!(amazon polly 사용시 키 값을 확인하세요!)```")
return
await ctx.send("```< " + ctx.author.display_name + " >님이 \"" + sayMessage + "\"```", tts=False)
try:
if aws_key != "" and aws_secret_key != "":
await PlaySound(ctx.voice_client, './sound/say.mp3')
else:
await PlaySound(ctx.voice_client, './sound/say.wav')
except:
await ctx.send( f"```음성파일 재생에 실패하였습니다. 접속에 문제가 있거나 음성채널에 접속 되지 않은 상태입니다.!```")
return
else:
return
################ 리젠시간 출력 ################
@commands.command(name=command[17][0], aliases=command[17][1:])
async def regenTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
await ctx.send(embed=regenembed, tts=False)
else:
return
################ 현재시간 확인 ################
@commands.command(name=command[18][0], aliases=command[18][1:])
async def currentTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
curruntTime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
embed = discord.Embed(
title = '현재시간은 ' + curruntTime.strftime('%H') + '시 ' + curruntTime.strftime('%M') + '분 ' + curruntTime.strftime('%S')+ '초 입니다.',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 공지 등록/확인 ################
@commands.command(name=command[19][0], aliases=command[19][1:])
async def notice_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content.split(" ")
if len(msg) > 1:
sayMessage = " ".join(msg[1:])
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 등록", sayMessage, contents.sha)
await ctx.send( '< 공지 등록완료 >', tts=False)
else:
notice_initdata = repo.get_contents("notice.ini")
notice = base64.b64decode(notice_initdata.content)
notice = notice.decode('utf-8')
if notice != '' :
embed = discord.Embed(
description= str(notice),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '```등록된 공지가 없습니다.```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 공지 삭제 ################
@commands.command(name=command[20][0], aliases=command[20][1:])
async def noticeDel_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 삭제", '', contents.sha)
await ctx.send( '< 공지 삭제완료 >', tts=False)
else:
return
################ 봇 상태메세지 변경 ################
@commands.command(name=command[21][0], aliases=command[21][1:])
async def botStatus_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
sayMessage = msg
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=sayMessage, type=1), afk = False)
await ctx.send( '< 상태메세지 변경완료 >', tts=False)
else:
return
################ 보스타임 출력 ################
@commands.command(name=command[22][0], aliases=command[22][1:])
async def bossTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_time_delta = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1000 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
tmp_time_delta = (tmp_bossTime[i].date() - (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).date()).days
if tmp_time_delta == 0:
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
if tmp_time_delta > 0:
aa.append(f"(+{tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
else:
aa.append(f"({tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
tmp_time_delta = 0
# aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
tmp_time_delta = (tmp_bossTime[i].date() - (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).date()).days
if tmp_time_delta == 0:
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
if tmp_time_delta > 0:
aa.append(f"(+{tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
else:
aa.append(f"({tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
tmp_time_delta = 0
# aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(fixed_bossTime[i].strftime('%H:%M'))
aa.append('@') #output_bossData[3] : @
aa.append(0) #output_bossData[4] : 멍/미입력 보스
aa.append(0) #output_bossData[5] : 멍/미입력횟수
aa.append("") #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
if len(boss_information) == 1 and len(tmp_boss_information) == 1:
###########################
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
embed.add_field(
name="----- 미예약 보스 -----",
value= tmp_boss_information[0],
inline = False
)
await ctx.send( embed=embed, tts=False)
else :
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
else:
return
################ 보스타임 출력(고정보스포함) ################
@commands.command(name=command[23][0], aliases=command[23][1:])
async def bossTime_fixed_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
fixed_datelist = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == tmp_bossTime[i].strftime('%Y-%m-%d'):
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
aa.append(f"[{tmp_bossTime[i].strftime('%Y-%m-%d')}] {tmp_bossTime[i].strftime('%H:%M:%S')}")
# aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == bossTime[i].strftime('%Y-%m-%d'):
aa.append(bossTime[i].strftime('%H:%M:%S'))
else:
aa.append(f"[{bossTime[i].strftime('%Y-%m-%d')}] {bossTime[i].strftime('%H:%M:%S')}")
# aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
fixed_datelist.append(fixed_bossTime[i])
fixed_datelist = list(set(fixed_datelist))
fixedboss_information = []
cntF = 0
fixedboss_information.append('')
for timestring1 in sorted(fixed_datelist):
if len(fixedboss_information[cntF]) > 1800 :
fixedboss_information.append('')
cntF += 1
for i in range(fixed_bossNum):
if timestring1 == fixed_bossTime[i]:
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == fixed_bossTime[i].strftime('%Y-%m-%d'):
tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M')
else:
tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M')
fixedboss_information[cntF] = fixedboss_information[cntF] + tmp_timeSTR + ' : ' + fixed_bossData[i][0] + '\n'
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
###########################고정보스출력
if len(fixedboss_information[0]) != 0:
fixedboss_information[0] = "```diff\n" + fixedboss_information[0] + "\n```"
else :
fixedboss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 고 정 보 스 -----",
description= fixedboss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(fixedboss_information)-1):
if len(fixedboss_information[i+1]) != 0:
fixedboss_information[i+1] = "```diff\n" + fixedboss_information[i+1] + "\n```"
else :
fixedboss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= fixedboss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
else:
return
################ 킬초기화 ################
@commands.command(name=command[24][0], aliases=command[24][1:])
async def killInit_(self, ctx):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
kill_Data = {}
await init_data_list('kill_list.ini', '-----척살명단-----')
return await ctx.send( '< 킬 목록 초기화완료 >', tts=False)
else:
return
################ 킬명단 확인 및 추가################
@commands.command(name=command[25][0], aliases=command[25][1:])
async def killList_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
kill_output = ''
for key, value in kill_Data.items():
kill_output += ':skull_crossbones: ' + str(key) + ' : ' + str(value) + '번 따히!\n'
if kill_output != '' :
embed = discord.Embed(
description= str(kill_output),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '등록된 킬 목록이 없습니다. 분발하세요!',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
if args in kill_Data:
kill_Data[args] += 1
else:
kill_Data[args] = 1
embed = discord.Embed(
description= ':skull_crossbones: ' + args + ' 따히! [' + str(kill_Data[args]) + '번]\n',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else:
return
################ 킬삭제 ################
@commands.command(name=command[26][0], aliases=command[26][1:])
async def killDel_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
return await ctx.send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
if args in kill_Data:
del kill_Data[args]
return await ctx.send( ':angel: ' + args + ' 삭제완료!', tts=False)
else :
return await ctx.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
return
################ 킬 차감 ################
@commands.command(name=command[33][0], aliases=command[33][1:])
async def killSubtract_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
return await ctx.send(f'{command[33][0]} [아이디] 혹은 {command[33][0]} [아이디] [횟수] 양식에 맞춰 입력해주세요!', tts = False)
input_data = args.split()
if len(input_data) == 1:
kill_name = args
count = 1
elif len(input_data) == 2:
kill_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'[횟수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[33][0]} [아이디] 혹은 {command[33][0]} [아이디] [횟수] 양식에 맞춰 입력해주세요!', tts = False)
if kill_name in kill_Data:
if kill_Data[kill_name] < int(count):
return await ctx.send( f"등록된 킬 횟수[{str(kill_Data[kill_name])}번]보다 차감 횟수[{str(count)}번]가 많습니다. 킬 횟수에 맞게 재입력 바랍니다.", tts=False)
else:
kill_Data[kill_name] -= int(count)
else:
return await ctx.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
embed = discord.Embed(
description= f':angel: [{kill_name}] [{str(count)}번] 차감 완료! [잔여 : {str(kill_Data[kill_name])}번]\n',
color=0xff00ff
)
if kill_Data[kill_name] == 0:
del kill_Data[kill_name]
return await ctx.send(embed=embed, tts=False)
else:
return
################ 경주 ################
@commands.command(name=command[27][0], aliases=command[27][1:])
async def race_(self, ctx):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[19]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
race_info = []
fr = []
racing_field = []
str_racing_field = []
cur_pos = []
race_val = []
random_pos = []
racing_result = []
output = ':camera: :camera: :camera: 신나는 레이싱! :camera: :camera: :camera:\n'
#racing_unit = [':giraffe:', ':elephant:', ':tiger2:', ':hippopotamus:', ':crocodile:',':leopard:',':ox:', ':sheep:', ':pig2:',':dromedary_camel:',':dragon:',':rabbit2:'] #동물스킨
#racing_unit = [':red_car:', ':taxi:', ':bus:', ':trolleybus:', ':race_car:', ':police_car:', ':ambulance:', ':fire_engine:', ':minibus:', ':truck:', ':articulated_lorry:', ':tractor:', ':scooter:', ':manual_wheelchair:', ':motor_scooter:', ':auto_rickshaw:', ':blue_car:', ':bike:', ':helicopter:', ':steam_locomotive:'] #탈것스킨
#random.shuffle(racing_unit)
racing_member = msg.split(" ")
racing_unit = []
emoji = discord.Emoji
emoji = ctx.message.guild.emojis
for j in range(len(tmp_racing_unit)):
racing_unit.append(':' + tmp_racing_unit[j] + ':')
for i in range(len(emoji)):
if emoji[i].name == tmp_racing_unit[j].strip(":"):
racing_unit[j] = '<:' + tmp_racing_unit[j] + ':' + str(emoji[i].id) + '>'
random.shuffle(racing_unit)
field_size = 60
tmp_race_tab = 35 - len(racing_member)
if len(racing_member) <= 1:
await ctx.send('레이스 인원이 2명보다 작습니다.')
return
elif len(racing_member) >= 13:
await ctx.send('레이스 인원이 12명 초과입니다.')
return
else :
race_val = random.sample(range(tmp_race_tab, tmp_race_tab+len(racing_member)), len(racing_member))
random.shuffle(race_val)
for i in range(len(racing_member)):
fr.append(racing_member[i])
fr.append(racing_unit[i])
fr.append(race_val[i])
race_info.append(fr)
fr = []
for i in range(field_size):
fr.append(" ")
racing_field.append(fr)
fr = []
for i in range(len(racing_member)):
racing_field[i][0] = "|"
racing_field[i][field_size-2] = race_info[i][1]
if len(race_info[i][0]) > 5:
racing_field[i][field_size-1] = "| " + race_info[i][0][:5] + '..'
else:
racing_field[i][field_size-1] = "| " + race_info[i][0]
str_racing_field.append("".join(racing_field[i]))
cur_pos.append(field_size-2)
for i in range(len(racing_member)):
output += str_racing_field[i] + '\n'
result_race = await ctx.send(output + ':traffic_light: 3초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':traffic_light: 2초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':traffic_light: 1초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':checkered_flag: 경주 시작!')
for i in range(len(racing_member)):
test = random.sample(range(2,field_size-2), race_info[i][2])
while len(test) != tmp_race_tab + len(racing_member)-1 :
test.append(1)
test.append(1)
test.sort(reverse=True)
random_pos.append(test)
for j in range(len(random_pos[0])):
if j%2 == 0:
output = ':camera: :camera_with_flash: :camera: 신나는 레이싱! :camera_with_flash: :camera: :camera_with_flash:\n'
else :
output = ':camera_with_flash: :camera: :camera_with_flash: 신나는 레이싱! :camera: :camera_with_flash: :camera:\n'
str_racing_field = []
for i in range(len(racing_member)):
temp_pos = cur_pos[i]
racing_field[i][random_pos[i][j]], racing_field[i][temp_pos] = racing_field[i][temp_pos], racing_field[i][random_pos[i][j]]
cur_pos[i] = random_pos[i][j]
str_racing_field.append("".join(racing_field[i]))
await asyncio.sleep(1)
for i in range(len(racing_member)):
output += str_racing_field[i] + '\n'
await result_race.edit(content = output + ':checkered_flag: 경주 시작!')
for i in range(len(racing_field)):
fr.append(race_info[i][0])
fr.append((race_info[i][2]) - tmp_race_tab + 1)
racing_result.append(fr)
fr = []
result = sorted(racing_result, key=lambda x: x[1])
result_str = ''
for i in range(len(result)):
if result[i][1] == 1:
result[i][1] = ':first_place:'
elif result[i][1] == 2:
result[i][1] = ':second_place:'
elif result[i][1] == 3:
result[i][1] = ':third_place:'
elif result[i][1] == 4:
result[i][1] = ':four:'
elif result[i][1] == 5:
result[i][1] = ':five:'
elif result[i][1] == 6:
result[i][1] = ':six:'
elif result[i][1] == 7:
result[i][1] = ':seven:'
elif result[i][1] == 8:
result[i][1] = ':eight:'
elif result[i][1] == 9:
result[i][1] = ':nine:'
elif result[i][1] == 10:
result[i][1] = ':keycap_ten:'
else:
result[i][1] = ':x:'
result_str += result[i][1] + " " + result[i][0] + " "
#print(result)
await asyncio.sleep(1)
return await result_race.edit(content = output + ':tada: 경주 종료!\n' + result_str)
else:
return
################ 채널설정 ################
@commands.command(name=command[28][0], aliases=command[28][1:])
async def set_channel_(self, ctx):
global basicSetting
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
if channel == basicSetting[7] and msg in ["사다리", "정산", "척살", "경주", "아이템"]:
return await ctx.send(f'명령어 채널은 `{msg} 채널`로 `설정`할 수 없습니다.', tts=False)
if msg == '사다리' : #사다리 채널 설정
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('ladderchannel'):
inputData_textCH[i] = 'ladderchannel = ' + str(channel) + '\r'
basicSetting[8] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 사다리채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 사다리채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '정산' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('jungsanchannel'):
inputData_textCH[i] = 'jungsanchannel = ' + str(channel) + '\r'
basicSetting[11] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 정산채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 정산채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '척살' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('killchannel'):
inputData_textCH[i] = 'killchannel = ' + str(channel) + '\r'
basicSetting[18] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 척살채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 척살채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '경주' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('racingchannel'):
inputData_textCH[i] = 'racingchannel = ' + str(channel) + '\r'
basicSetting[19] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 경주채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 경주채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '아이템' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('itemchannel'):
inputData_textCH[i] = 'itemchannel = ' + str(channel) + '\r'
basicSetting[20] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 아이템채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 아이템채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
else :
return await ctx.send(f'```올바른 명령어를 입력해주세요.```', tts=False)
################ 채널삭제 ################
@commands.command(name=command[42][0], aliases=command[42][1:])
async def remove_channel_(self, ctx):
global basicSetting
if ctx.message.channel.id != basicSetting[7]:
return
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
if msg == '사다리' : #사다리 채널 설정
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[8]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('ladderchannel'):
inputData_textCH[i] = 'ladderchannel = \r'
basicSetting[8] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 사다리채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 사다리채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '정산' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[11]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('jungsanchannel'):
inputData_textCH[i] = 'jungsanchannel = \r'
basicSetting[11] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 정산채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 정산채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '척살' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[18]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('killchannel'):
inputData_textCH[i] = 'killchannel = \r'
basicSetting[18] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 척살채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 척살채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '경주' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[19]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('racingchannel'):
inputData_textCH[i] = 'racingchannel = \r'
basicSetting[19] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 경주채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 경주채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '아이템' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[20]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('itemchannel'):
inputData_textCH[i] = 'itemchannel = \r'
basicSetting[20] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 아이템채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 아이템채널 [{ch_name}] 삭제완료 >', tts=False)
else :
return await ctx.send(f'```올바른 명령어를 입력해주세요.```', tts=False)
################ 아이템초기화 확인 ################
@commands.command(name=command[29][0], aliases=command[29][1:])
async def itemInit_(self, ctx):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
item_Data = {}
await init_data_list('item_list.ini', '-----아이템 목록-----')
return await ctx.send( '< 아이템 목록 초기화완료 >', tts=False)
else:
return
################ 아이템 목록 확인 및 추가 ################
@commands.command(name=command[30][0], aliases=command[30][1:])
async def itemList_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
sorted_item_list = sorted(item_Data.items(), key=lambda x: x[0])
embed_list : list = []
embed_index : int = 0
embed_cnt : int = 0
embed = discord.Embed(title = '', description = f'`{self.bot.user.name}\'s 창고`', color = 0x00ff00)
embed_list.append(embed)
if len(sorted_item_list) > 0 :
for item_id, count in sorted_item_list:
embed_cnt += 1
if embed_cnt > 24 :
embed_cnt = 0
embed_index += 1
tmp_embed = discord.Embed(
title = "",
description = "",
color=0x00ff00
)
embed_list.append(tmp_embed)
embed_list[embed_index].add_field(name = item_id, value = count)
embed_list[len(embed_list)-1].set_footer(text = f"전체 아이템 종류 : {len(item_Data)}개")
if len(embed_list) > 1:
for embed_data in embed_list:
await asyncio.sleep(0.1)
await ctx.send(embed = embed_data)
return
else:
return await ctx.send(embed=embed, tts=False)
else :
embed.add_field(name = '\u200b\n', value = '창고가 비었습니다.\n\u200b')
return await ctx.send(embed=embed, tts=False)
input_data = args.split()
if len(input_data) == 1:
item_name = args
count = 1
elif len(input_data) == 2:
item_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'아이템 [개수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[30][0]} [아이템명] 혹은 {command[30][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
if item_name in item_Data:
item_Data[item_name] += int(count)
else:
item_Data[item_name] = int(count)
embed = discord.Embed(
description= f':inbox_tray: **[{item_name}] [{str(count)}개]** 등록 완료! [잔여 : {str(item_Data[item_name])}개]\n',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else:
return
################ 아이템 삭제 ################
@commands.command(name=command[31][0], aliases=command[31][1:])
async def itemDel_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
return await ctx.send( f'{command[31][0]} [아이템명] 양식에 맞춰 입력해주세요!', tts = False)
if args in item_Data:
del item_Data[args]
embed = discord.Embed(
description= ':outbox_tray: ' + args + ' 삭제완료!',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else :
return await ctx.send( '```아이템 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
return
################ 아이템 차감 ################
@commands.command(name=command[32][0], aliases=command[32][1:])
async def itemSubtract_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
return await ctx.send(f'{command[32][0]} [아이템명] 혹은 {command[32][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
input_data = args.split()
if len(input_data) == 1:
item_name = args
count = 1
elif len(input_data) == 2:
item_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'아이템 [개수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[32][0]} [아이템명] 혹은 {command[32][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
if item_name in item_Data:
if item_Data[item_name] < int(count):
return await ctx.send( f"등록된 아이템 개수[{str(item_Data[item_name])}개]보다 차감 개수[{str(count)}개]가 많습니다. 등록 개수에 맞게 재입력 바랍니다.", tts=False)
else:
item_Data[item_name] -= int(count)
else:
return await ctx.send( '```아이템 목록에 등록되어 있지 않습니다!\n```', tts=False)
embed = discord.Embed(
description= f':outbox_tray: **[{item_name}] [{str(count)}개]** 차감 완료! [잔여 : {str(item_Data[item_name])}개]\n',
color=0xff00ff
)
if item_Data[item_name] == 0:
del item_Data[item_name]
return await ctx.send(embed=embed, tts=False)
else:
return
################ 서버 나가기 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[34][0], aliases=command[34][1:])
async def leaveGuild_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
guild_list : str = ""
guild_name : str = ""
for i, gulid_name in enumerate(self.bot.guilds):
guild_list += f"`{i+1}.` {gulid_name}\n"
embed = discord.Embed(
title = "----- 서버 목록 -----",
description = guild_list,
color=0x00ff00
)
await ctx.send(embed = embed)
try:
await ctx.send(f"```떠나고 싶은 서버의 [숫자]를 입력하여 선택해 주세요```")
message_result : discord.Message = await self.bot.wait_for("message", timeout = 10, check=(lambda message: message.channel == ctx.message.channel and message.author == ctx.message.author))
except asyncio.TimeoutError:
return await ctx.send(f"```서버 선택 시간이 초과됐습니다! 필요시 명령어를 재입력해 주세요```")
try:
guild_name = self.bot.guilds[int(message_result.content)-1].name
await self.bot.get_guild(self.bot.guilds[int(message_result.content)-1].id).leave()
return await ctx.send(f"```[{guild_name}] 서버에서 떠났습니다.!```")
except ValueError:
return
################ 수수료 계산기 ################
@commands.command(name=command[35][0], aliases=command[35][1:])
async def tax_check(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
if not args:
return await ctx.send(f"**{command[35][0]} [판매금액] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
input_money_data : list = args.split()
len_input_money_data = len(input_money_data)
try:
for i in range(len_input_money_data):
input_money_data[i] = int(input_money_data[i])
except ValueError:
return await ctx.send(f"**[판매금액] (거래소세금)**은 숫자로 입력 해주세요.")
if len_input_money_data < 1 or len_input_money_data > 3:
return await ctx.send(f"**{command[35][0]} [판매금액] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
elif len_input_money_data == 2:
tax = input_money_data[1]
else:
tax = 5
price_first_tax = int(input_money_data[0] * ((100-tax)/100))
price_second_tax = int(price_first_tax * ((100-tax)/100))
price_rev_tax = int((input_money_data[0] * 100)/(100-tax)+0.5)
embed = discord.Embed(
title = f"🧮 수수료 계산결과 (세율 {tax}% 기준) ",
description = f"",
color=0x00ff00
)
embed.add_field(name = "⚖️ 수수료 지원", value = f"```등록가 : {price_rev_tax}\n수령가 : {input_money_data[0]}\n세 금 : {price_rev_tax-input_money_data[0]}```")
embed.add_field(name = "⚖️ 1차 거래", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_first_tax}\n세 금 : {input_money_data[0]-price_first_tax}```")
embed.add_field(name = "⚖️ 2차 거래", value = f"```등록가 : {price_first_tax}\n정산가 : {price_second_tax}\n세 금 : {price_first_tax-price_second_tax}```")
return await ctx.send(embed = embed)
else:
return
################ 페이백 계산기 ################
@commands.command(name=command[36][0], aliases=command[36][1:])
async def payback_check(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
if not args:
return await ctx.send(f"**{command[36][0]} [거래소가격] [실거래가] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
input_money_data : list = args.split()
len_input_money_data = len(input_money_data)
try:
for i in range(len_input_money_data):
input_money_data[i] = int(input_money_data[i])
except ValueError:
return await ctx.send(f"**[판매금액] (거래소세금)**은 숫자로 입력 해주세요.")
if len_input_money_data < 2 or len_input_money_data > 4:
return await ctx.send(f"**{command[36][0]} [거래소가격] [실거래가] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
elif len_input_money_data == 3:
tax = input_money_data[2]
else:
tax = 5
price_reg_tax = int(input_money_data[0] * ((100-tax)/100))
price_real_tax = int(input_money_data[1] * ((100-tax)/100))
reault_payback = price_reg_tax - price_real_tax
reault_payback1= price_reg_tax - input_money_data[1]
embed = discord.Embed(
title = f"🧮 페이백 계산결과1 (세율 {tax}% 기준) ",
description = f"**```fix\n{reault_payback}```**",
color=0x00ff00
)
embed.add_field(name = "⚖️ 거래소", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_reg_tax}\n세 금 : {input_money_data[0]-price_reg_tax}```")
embed.add_field(name = "🕵️ 실거래", value = f"```등록가 : {input_money_data[1]}\n정산가 : {price_real_tax}\n세 금 : {input_money_data[1]-price_real_tax}```")
await ctx.send(embed = embed)
embed2 = discord.Embed(
title = f"🧮 페이백 계산결과2 (세율 {tax}% 기준) ",
description = f"**```fix\n{reault_payback1}```**",
color=0x00ff00
)
embed2.add_field(name = "⚖️ 거래소", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_reg_tax}\n세 금 : {input_money_data[0]-price_reg_tax}```")
embed2.add_field(name = "🕵️ 실거래", value = f"```내판가 : {input_money_data[1]}```")
return await ctx.send(embed = embed2)
else:
return
@commands.command(name=command[37][0], aliases=command[37][1:])
async def command_rock_paper_scissors_game(self, ctx : commands.Context):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id != basicSetting[7] and ctx.message.channel.id != basicSetting[19]:
return
message_rock_paper_scissors : discord.message.Message = await ctx.send("안내면 진거 가위바위..")
reaction_emoji : list = ["✌️", "✊", "✋"]
for emoji in reaction_emoji:
await message_rock_paper_scissors.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == message_rock_paper_scissors.id) and (user.id == ctx.author.id) and (str(reaction) in reaction_emoji)
try:
reaction_result, user = await self.bot.wait_for('reaction_add', check = reaction_check, timeout = int(basicSetting[5]))
except asyncio.TimeoutError:
return await ctx.send(f"시간이 초과됐습니다. ")
bot_result : str = random.choice(reaction_emoji)
result_rock_paper_scissors : str = ""
if reaction_result is None:
result_rock_paper_scissors = f"왜 안냄?"
elif str(reaction_result) == bot_result:
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n🤔비겼다!"
elif str(reaction_result) == "✌️" and bot_result == "✋":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
elif str(reaction_result) == "✊" and bot_result == "✌️":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
elif str(reaction_result) == "✋" and bot_result == "✊":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
else:
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n🤪저런.."
return await ctx.send(result_rock_paper_scissors)
################ 보이스사용 ################
@commands.command(name=command[38][0], aliases=command[38][1:])
async def command_voice_use(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
inidata_voice_use = repo.get_contents("test_setting.ini")
file_data_voice_use = base64.b64decode(inidata_voice_use.content)
file_data_voice_use = file_data_voice_use.decode('utf-8')
inputData_voice_use = file_data_voice_use.split('\n')
for i in range(len(inputData_voice_use)):
if inputData_voice_use[i].startswith("voice_use ="):
inputData_voice_use[i] = f"voice_use = 1\r"
basicSetting[21] = "1"
result_voice_use = '\n'.join(inputData_voice_use)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voice_use, contents.sha)
if basicSetting[6] != "":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
except:
await ctx.send( '< 음성채널 접속 에러! >', tts=False)
pass
if self.bot.voice_clients[0].is_connected() :
print("보이스 사용 설정 완료!")
return await ctx.send(f"```보이스를 사용하도록 설정하였습니다.!```")
return await ctx.send(f"```보이스 사용 설정이 완료 되었습니다!\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >```")
################ 보이스미사용 ################
@commands.command(name=command[39][0], aliases=command[39][1:])
async def command_voice_not_use(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
for vc in self.bot.voice_clients:
if vc.guild.id == int(ctx.guild.id):
if vc.is_playing():
vc.stop()
await vc.disconnect(force=True)
inidata_voice_use = repo.get_contents("test_setting.ini")
file_data_voice_use = base64.b64decode(inidata_voice_use.content)
file_data_voice_use = file_data_voice_use.decode('utf-8')
inputData_voice_use = file_data_voice_use.split('\n')
for i in range(len(inputData_voice_use)):
if inputData_voice_use[i].startswith("voice_use ="):
inputData_voice_use[i] = f"voice_use = 0\r"
basicSetting[21] = "0"
result_voice_use = '\n'.join(inputData_voice_use)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voice_use, contents.sha)
return await ctx.send(f"```보이스를 사용하지 않도록 설정하였습니다.!```")
################ 럭키박스 ################
@commands.command(name=command[41][0], aliases=command[41][1:])
async def command_randombox_game(self, ctx : commands.Context, *, args : str = None):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id != basicSetting[7] and ctx.message.channel.id != basicSetting[19]:
return
if not args:
return await ctx.send(f'```명령어 [추첨인원] (대기시간/초) *(메모) 형태로 입력해주시기 바랍나다.```')
memo_data : str = ""
waiting_time : int = 30
if args.find("*") == -1:
input_game_data = args.split()
else:
input_game_data = args[:args.find("*")-1].split()
memo_data = args[args.find("*")+1:]
try:
num_cong = int(input_game_data[0]) # 뽑을 인원
if num_cong <= 0:
return await ctx.send(f'```추첨인원이 0보다 작거나 같습니다. 재입력 해주세요```')
except ValueError:
return await ctx.send('```추첨인원은 숫자로 입력 바랍니다\nex)!럭키박스 1```')
if len(input_game_data) >= 2:
waiting_time : int = 30
try:
waiting_time = int(input_game_data[1]) # 대기시간
if waiting_time <= 0 :
return await ctx.send(f'```대기시간이 0보다 작거나 같습니다. 재입력 해주세요```')
except ValueError:
return await ctx.send(f'```대기시간(초)는 숫자로 입력 바랍니다\nex)!럭키박스 1 60```')
reaction_emoji : list = ["✅", "❌"]
embed = discord.Embed(title = f"📦 럭키박스! 묻고 더블로 가! (잔여시간 : {waiting_time}초)", description = f"참가를 원하시면 ✅를 클릭해주세요!", timestamp =datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=int(basicSetting[0])))),
color=0x00ff00
)
if memo_data != "":
embed.add_field(name = "📜 메모", value = f"```{memo_data}```", inline=False)
game_message : discord.message.Message = await ctx.send(embed = embed)
for emoji in reaction_emoji:
await game_message.add_reaction(emoji)
cache_msg = await ctx.fetch_message(game_message.id)
for i in range(waiting_time):
embed.title = f"📦 럭키박스! 묻고 더블로 가! (잔여시간 : {waiting_time - i}초)"
await game_message.edit(embed=embed)
cache_msg = await ctx.fetch_message(game_message.id)
if cache_msg.reactions[1].count >= 2:
tmp_users = await cache_msg.reactions[1].users().flatten()
for user in tmp_users:
if user.id == ctx.author.id:
embed.title = f"😫 럭키박스! 취소! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f"```게임이 취소되었습니다.!```")
await asyncio.sleep(1)
if cache_msg.reactions[0].count == 1:
embed.title = f"😫 럭키박스! 추첨 실패! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f"```참여자가 없어 게임이 취소되었습니다.!```")
if num_cong >= cache_msg.reactions[0].count-1:
embed.title = f"😫 럭키박스! 추첨 실패! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f'```추첨인원이 참여인원과 같거나 많습니다. 재입력 해주세요```')
participant_users = await cache_msg.reactions[0].users().flatten()
del_index : int = 0
for i, user in enumerate(participant_users):
if self.bot.user.id == user.id:
del_index = i
del participant_users[del_index]
user_name_list : list = []
for user in participant_users:
user_name_list.append(user.mention)
for _ in range(num_cong + 5):
random.shuffle(user_name_list)
result_users = None
for _ in range(num_cong + 5):
result_users = random.sample(user_name_list, num_cong)
lose_user = list(set(user_name_list)-set(result_users))
embed.title = f"🎉 럭키박스! 결과발표! 🎉"
embed.description = ""
embed.add_field(name = f"👥 참가자 ({len(user_name_list)}명)", value = f"{', '.join(user_name_list)}", inline=False)
embed.add_field(name = f"😍 당첨 ({num_cong}명)", value = f"{', '.join(result_users)}")
if len(lose_user) != 0:
embed.add_field(name = f"😭 낙첨 ({len(lose_user)}명)", value = f"{', '.join(lose_user)}")
return await game_message.edit(embed=embed)
################ ?????????????? ################
@commands.command(name='!오빠')
async def brother1_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/오빠.mp3')
@commands.command(name='!언니')
async def sister_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/언니.mp3')
@commands.command(name='!형')
async def brother2_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/형.mp3')
@commands.command(name='!TJ', aliases=['!tj'])
async def TJ_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
resultTJ = random.randrange(1,9)
return await PlaySound(ctx.voice_client, './sound/TJ' + str(resultTJ) +'.mp3')
class IlsangDistributionBot(commands.AutoShardedBot):
def __init__(self):
super().__init__(command_prefix=[""], help_command=None)
def run(self):
super().run(access_token, reconnect=True)
async def on_ready(self):
global basicSetting
global channel
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chkvoicechannel
global chflg
global endTime
global setting_channel_name
print("Logged in as ") #화면에 봇의 아이디, 닉네임이 출력됩니다.
print(self.user.name)
print(self.user.id)
print("===========")
channel_name, channel_id, channel_voice_name, channel_voice_id = await get_guild_channel_info(self)
await dbLoad()
if str(basicSetting[7]) in channel_id:
channel = basicSetting[7]
setting_channel_name = self.get_channel(basicSetting[7]).name
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
print('< 접속시간 [' + now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S') + '] >')
print('< 텍스트채널 [' + self.get_channel(basicSetting[7]).name + '] 접속완료>')
if basicSetting[21] == "1" and str(basicSetting[6]) in channel_voice_id:
try:
await self.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print('< 음성채널 [' + self.get_channel(basicSetting[6]).name + '] 접속완료 >')
except:
print('< 음성채널 [' + self.get_channel(basicSetting[6]).name + '] 접속에러 >')
pass
elif basicSetting[21] == "1" and str(basicSetting[6]) not in channel_voice_id:
print(f"설정된 음성채널 값이 없거나 잘못 됐습니다. 음성채널 접속 후 **[{command[5][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
await self.get_channel(int(basicSetting[7])).send(f"설정된 음성채널 값이 없거나 잘못 됐습니다. 음성채널 접속 후 **[{command[5][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
if basicSetting[8] != "":
if str(basicSetting[8]) in channel_id:
print('< 사다리채널 [' + self.get_channel(int(basicSetting[8])).name + '] 접속완료 >')
else:
basicSetting[8] = ""
print(f"사다리채널 ID 오류! [{command[28][0]} 사다리] 명령으로 재설정 바랍니다.")
if basicSetting[11] != "":
if str(basicSetting[11]) in channel_id:
print('< 정산채널 [' + self.get_channel(int(basicSetting[11])).name + '] 접속완료>')
else:
basicSetting[11] = ""
print(f"정산채널 ID 오류! [{command[28][0]} 정산] 명령으로 재설정 바랍니다.")
if basicSetting[18] != "":
if str(basicSetting[18]) in channel_id:
print('< 척살채널 [' + self.get_channel(int(basicSetting[18])).name + '] 접속완료>')
else:
basicSetting[18] = ""
print(f"척살채널 ID 오류! [{command[28][0]} 척살] 명령으로 재설정 바랍니다.")
if basicSetting[19] != "":
if str(basicSetting[19]) in channel_id:
print('< 경주채널 [' + self.get_channel(int(basicSetting[19])).name + '] 접속완료>')
else:
basicSetting[19] = ""
print(f"경주채널 ID 오류! [{command[28][0]} 경주] 명령으로 재설정 바랍니다.")
if basicSetting[20] != "":
if str(basicSetting[20]) in channel_id:
print('< 아이템채널 [' + self.get_channel(int(basicSetting[20])).name + '] 접속완료>')
else:
basicSetting[20] = ""
print(f"아이템채널 ID 오류! [{command[28][0]} 아이템] 명령으로 재설정 바랍니다.")
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
else:
basicSetting[6] = ""
basicSetting[7] = ""
print(f"설정된 채널 값이 없거나 잘못 됐습니다. **[{command[0][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
# 디스코드에는 현재 본인이 어떤 게임을 플레이하는지 보여주는 기능이 있습니다.
# 이 기능을 사용하여 봇의 상태를 간단하게 출력해줄 수 있습니다.
await self.change_presence(status=discord.Status.online, activity=discord.Game(name=command[1][0], type=1), afk=False)
async def on_message(self, msg):
await self.wait_until_ready()
if msg.author.bot: #만약 메시지를 보낸사람이 봇일 경우에는
return None #동작하지 않고 무시합니다.
ori_msg = msg
global channel
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chflg
global LoadChk
global indexFixedBossname
global FixedBossDateData
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
id = msg.author.id #id라는 변수에는 메시지를 보낸사람의 ID를 담습니다.
if chflg == 1 :
if self.get_channel(basicSetting[7]).id == msg.channel.id:
channel = basicSetting[7]
message = msg
for command_str in ["컷", "멍", "예상", "삭제", "메모", "카톡켬", "카톡끔"]:
if command_str in message.content:
tmp_msg : str = ""
for key, value in boss_nick.items():
if message.content[:message.content.find(command_str)].strip() in value:
message.content = message.content.replace(message.content[:message.content.find(command_str)], key)
hello = message.content
for i in range(bossNum):
################ 보스 컷처리 ################
if message.content.startswith(bossData[i][0] +'컷') or message.content.startswith(convertToInitialLetters(bossData[i][0] +'컷')) or message.content.startswith(bossData[i][0] +' 컷') or message.content.startswith(convertToInitialLetters(bossData[i][0] +' 컷')):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
curr_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_msg = bossData[i][0] +'컷'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
################ 보스 멍 처리 ################
if message.content.startswith(bossData[i][0] +'멍') or message.content.startswith(bossData[i][0] +' 멍'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'멍'
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if len(hello) > len(tmp_msg) + 3 :
temptime = tmp_now
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossMungCnt[i] = 0
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
if temptime > tmp_now :
temptime = temptime + datetime.timedelta(days=int(-1))
if temptime < tmp_now :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while temptime < tmp_now :
temptime = temptime + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = temptime
tmp_bossTimeString[i] = bossTimeString[i] = temptime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = temptime.strftime('%Y-%m-%d')
if tmp_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
if tmp_bossTime[i] < tmp_now :
nextTime = tmp_bossTime[i] + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if tmp_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
await self.get_channel(channel).send('```' + bossData[i][0] + '탐이 아직 안됐습니다. 다음 ' + bossData[i][0] + '탐 [' + tmp_bossTimeString[i] + '] 입니다```', tts=False)
################ 예상 보스 타임 입력 ################
if message.content.startswith(bossData[i][0] +'예상') or message.content.startswith(bossData[i][0] +' 예상'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'예상'
if len(hello) > len(tmp_msg) + 4 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
await self.get_channel(channel).send('```' + bossData[i][0] +' 예상 시간을 입력해주세요.```', tts=False)
################ 보스타임 삭제 ################
if message.content == bossData[i][0] +'삭제' or message.content == bossData[i][0] +' 삭제':
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
await self.get_channel(channel).send('<' + bossData[i][0] + ' 삭제완료>', tts=False)
await dbSave()
print ('<' + bossData[i][0] + ' 삭제완료>')
################ 보스별 메모 ################
if message.content.startswith(bossData[i][0] +'메모 '):
tmp_msg = bossData[i][0] +'메모 '
bossData[i][6] = hello[len(tmp_msg):]
await self.get_channel(channel).send('< ' + bossData[i][0] + ' [ ' + bossData[i][6] + ' ] 메모등록 완료>', tts=False)
if message.content.startswith(bossData[i][0] +'메모삭제'):
bossData[i][6] = ''
await self.get_channel(channel).send('< ' + bossData[i][0] + ' 메모삭제 완료>', tts=False)
await self.process_commands(ori_msg)
async def on_command_error(self, ctx : commands.Context, error : commands.CommandError):
if isinstance(error, CommandNotFound):
return
elif isinstance(error, MissingRequiredArgument):
return
elif isinstance(error, discord.ext.commands.MissingPermissions):
return await ctx.send(f"**[{ctx.message.content.split()[0]}]** 명령을 사용할 권한이 없습니다.!")
elif isinstance(error, discord.ext.commands.CheckFailure):
return await ctx.send(f"**[{ctx.message.content.split()[0]}]** 명령을 사용할 권한이 없습니다.!")
raise error
async def close(self):
await super().close()
print("일상디코봇 종료 완료.")
ilsang_distribution_bot : IlsangDistributionBot = IlsangDistributionBot()
ilsang_distribution_bot.add_cog(mainCog(ilsang_distribution_bot))
ilsang_distribution_bot.add_cog(taskCog(ilsang_distribution_bot))
ilsang_distribution_bot.run()
|
the-stack_0_22518 | # -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import numpy as np
from models.sequential.SASRec import SASRec
class TiSASRec(SASRec):
@staticmethod
def parse_model_args(parser):
parser.add_argument('--time_max', type=int, default=512,
help='Max time intervals.')
return SASRec.parse_model_args(parser)
def __init__(self, args, corpus):
self.max_time = args.time_max
super().__init__(args, corpus)
setattr(corpus, 'user_min_interval', dict())
for u, user_df in corpus.all_df.groupby('user_id'):
time_seqs = user_df['time'].values
interval_matrix = np.abs(time_seqs[:, None] - time_seqs[None, :])
min_interval = np.min(interval_matrix + (interval_matrix <= 0) * 0xFFFF)
corpus.user_min_interval[u] = min_interval
def _define_params(self):
self.i_embeddings = nn.Embedding(self.item_num, self.emb_size)
self.p_k_embeddings = nn.Embedding(self.max_his + 1, self.emb_size)
self.p_v_embeddings = nn.Embedding(self.max_his + 1, self.emb_size)
self.t_k_embeddings = nn.Embedding(self.max_time + 1, self.emb_size)
self.t_v_embeddings = nn.Embedding(self.max_time + 1, self.emb_size)
self.transformer_block = nn.ModuleList([
TimeIntervalTransformerLayer(d_model=self.emb_size, d_ff=self.emb_size, n_heads=self.num_heads,
dropout=self.dropout, kq_same=False)
for _ in range(self.num_layers)
])
def forward(self, feed_dict):
self.check_list = []
i_ids = feed_dict['item_id'] # [batch_size, -1]
i_history = feed_dict['history_items'] # [batch_size, history_max]
t_history = feed_dict['history_times'] # [batch_size, history_max]
user_min_t = feed_dict['user_min_intervals'] # [batch_size]
lengths = feed_dict['lengths'] # [batch_size]
batch_size, seq_len = i_history.shape
valid_his = (i_history > 0).long()
his_vectors = self.i_embeddings(i_history)
# Position embedding
position = (lengths[:, None] - self.len_range[None, :seq_len]) * valid_his
pos_k = self.p_k_embeddings(position)
pos_v = self.p_v_embeddings(position)
# Interval embedding
interval_matrix = (t_history[:, :, None] - t_history[:, None, :]).abs()
interval_matrix = (interval_matrix / user_min_t.view(-1, 1, 1)).long().clamp(0, self.max_time)
inter_k = self.t_k_embeddings(interval_matrix)
inter_v = self.t_v_embeddings(interval_matrix)
# Self-attention
causality_mask = np.tril(np.ones((1, 1, seq_len, seq_len), dtype=np.int))
attn_mask = torch.from_numpy(causality_mask).to(self.device)
# attn_mask = valid_his.view(batch_size, 1, 1, seq_len)
for block in self.transformer_block:
his_vectors = block(his_vectors, pos_k, pos_v, inter_k, inter_v, attn_mask)
his_vectors = his_vectors * valid_his[:, :, None].float()
his_vector = (his_vectors * (position == 1).float()[:, :, None]).sum(1)
# his_vector = his_vectors.sum(1) / lengths[:, None].float()
# ↑ average pooling is shown to be more effective than the most recent embedding
i_vectors = self.i_embeddings(i_ids)
prediction = (his_vector[:, None, :] * i_vectors).sum(-1)
return {'prediction': prediction.view(batch_size, -1)}
class Dataset(SASRec.Dataset):
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
user_id = self.data['user_id'][index]
min_interval = self.corpus.user_min_interval[user_id]
feed_dict['history_times'] = np.array(self.data['time_his'][index])
feed_dict['user_min_intervals'] = min_interval
return feed_dict
class TimeIntervalMultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
# perform linear operation and split into h heads
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
# transpose to get dimensions bs * h * -1 * d_k
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
# interaction (time interval) embeddings
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2) # bs, head, seq_len, seq_len, d_k
# calculate attention using function we will define next
output = self.scaled_dot_product_attention(q, k, v, inter_k, inter_v, self.d_k, mask)
# concatenate heads and put through final linear layer
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1)) # bs, head, q_len, k_len
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v) # bs, head, q_len, d_k
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
class TimeIntervalTransformerLayer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model, n_heads, kq_same=kq_same)
# Two layer norm layer and two dropout layer
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, pos_k, pos_v, inter_k, inter_v, mask):
context = self.masked_attn_head(seq, seq, seq, pos_k, pos_v, inter_k, inter_v, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
|
the-stack_0_22519 | def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print(f'\033[1;31mERRO: por favor, digite um número inteiro válido.\033[m')
continue
except (KeyboardInterrupt):
print('Entrada de dados interropida pelo usuário.')
else:
break
return n
def linha(tam=42):
return '-' *tam
def cabeçalho(txt):
print(linha())
print(f'{txt:^42}') #ou {txt.center(42)}
print(linha())
def menu(lista):
cabeçalho('MENU PRINCIPAL')
c = 1
for item in lista:
print(f'\033[33m{c}\033[m - \033[34m{item}\033[m')
c+=1
print(linha())
opc = leiaInt('Sua Opção:')
return opc |
the-stack_0_22520 | import discord
from discord.ext import commands
bot = commands.Bot(command_prefix = '%')
@bot.event
async def on_read():
print('Ready')
@bot.command(aliases = ['say'])
async def test(ctx, *args):
# ctx, komutu gööndere kişi ile ilgili bilgileri içerir
text = ""
for i in args:
text+=i+" "
await ctx.send(f'"{text}"')
@bot.command()
@commands.has_role("admin") # rol kısıtlaması
async def spoilerText(ctx):
await ctx.send('||spoilerText||')
@bot.command()
async def user(ctx, *, member): # * ifadesinden sonraki tüm değerler member içine gider
membername, memberid = member.split('#')
await ctx.send(membername+" "+memberid)
@bot.command()
async def bsay(ctx, *, text):
await ctx.send(f"'{text}'")
@bot.command()
async def cpyc(ctx):
await ctx.channel.clone()
# await asenkron fonksiyonda işlemi kuyruğa sokar
bot.run('ODI0NjQ0MzYyMzI3MTYyODgx.YFyX6Q.wYANeBbu71zuo_09-rLMiPg_bNU')
|
the-stack_0_22524 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing.
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
from test_framework.mininode import network_thread_start
import struct
class PreviousSpendableOutput():
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
block.nVersion = 0x20000000
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
block.nVersion = 0x20000000
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 20000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
block(27, spend=out[7])
yield rejected(False)
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure bitcoind isn't accepting b28
block(29, spend=out[7])
yield rejected(False)
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# same as b40, but one less sigop
tip(39)
block(41, spend=None)
update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nVersion = 0x20000000
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nVersion = 0x20000000
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nVersion = 0x20000000
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# Pricecoin: Temporarily disable test
# A block with invalid work
#tip(44)
#b47 = block(47, solve=False)
#target = uint256_from_compact(b47.nBits)
#while b47.scrypt256 < target: #changed > to <
# b47.nNonce += 1
# b47.rehash()
#yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
tip(60)
b61 = block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
b61.vtx[0].rehash()
b61 = update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
yield rejected(RejectResult(16, b'bad-txns-BIP30'))
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 1088
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
|
the-stack_0_22525 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Lock, Thread, Event
import time
import weakref
import sys
import ssl
from six.moves import range
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager
log = logging.getLogger(__name__)
_dispatcher_map = {}
def _cleanup(loop):
if loop:
loop._cleanup()
class WaitableTimer(Timer):
def __init__(self, timeout, callback):
Timer.__init__(self, timeout, callback)
self.callback = callback
self.event = Event()
self.final_exception = None
def finish(self, time_now):
try:
finished = Timer.finish(self, time_now)
if finished:
self.event.set()
return True
return False
except Exception as e:
self.final_exception = e
self.event.set()
return True
def wait(self, timeout=None):
self.event.wait(timeout)
if self.final_exception:
raise self.final_exception
class _PipeWrapper(object):
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def close(self):
os.close(self.fd)
def getsockopt(self, level, optname, buflen=None):
# act like an unerrored socket for the asyncore error handling
if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen:
return 0
raise NotImplementedError()
class _AsyncoreDispatcher(asyncore.dispatcher):
def __init__(self, socket):
asyncore.dispatcher.__init__(self, map=_dispatcher_map)
# inject after to avoid base class validation
self.set_socket(socket)
self._notified = False
def writable(self):
return False
def validate(self):
assert not self._notified
self.notify_loop()
assert self._notified
self.loop(0.1)
assert not self._notified
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1)
class _AsyncorePipeDispatcher(_AsyncoreDispatcher):
def __init__(self):
self.read_fd, self.write_fd = os.pipe()
_AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd))
def writable(self):
return False
def handle_read(self):
while len(os.read(self.read_fd, 4096)) == 4096:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
os.write(self.write_fd, b'x')
class _AsyncoreUDPDispatcher(_AsyncoreDispatcher):
"""
Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because
it relies on local port binding.
Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per
instance, or this could be specialized to scan until an address is found.
To use::
from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop
AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher
"""
bind_address = ('localhost', 10000)
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self.bind_address)
self._socket.setblocking(0)
_AsyncoreDispatcher.__init__(self, self._socket)
def handle_read(self):
try:
d = self._socket.recvfrom(1)
while d and d[1]:
d = self._socket.recvfrom(1)
except socket.error as e:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
self._socket.sendto(b'', self.bind_address)
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1)
class _BusyWaitDispatcher(object):
max_write_latency = 0.001
"""
Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check
if anything is writable.
"""
def notify_loop(self):
pass
def loop(self, timeout):
if not _dispatcher_map:
time.sleep(0.005)
count = timeout // self.max_write_latency
asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count)
def validate(self):
pass
def close(self):
pass
class AsyncoreLoop(object):
timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts
_loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="asyncore_cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def wake_loop(self):
self._loop_dispatcher.notify_loop()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while not self._shutdown:
try:
self._loop_dispatcher.loop(self.timer_resolution)
self._timers.service_timeouts()
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
self._started = False
log.debug("Asyncore event loop ended")
def add_timer(self, timer):
self._timers.add_timer(timer)
# This function is called from a different thread than the event loop
# thread, so for this call to be thread safe, we must wake up the loop
# in case it's stuck at a select
self.wake_loop()
def _cleanup(self):
global _dispatcher_map
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
# Ensure all connections are closed and in-flight requests cancelled
for conn in tuple(_dispatcher_map.values()):
if conn is not self._loop_dispatcher:
conn.close()
self._timers.service_timeouts()
# Once all the connections are closed, close the dispatcher
self._loop_dispatcher.close()
log.debug("Dispatchers were closed")
_global_loop = None
atexit.register(partial(_cleanup, _global_loop))
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
global _global_loop
if not _global_loop:
_global_loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if _global_loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
_global_loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
global _dispatcher_map, _global_loop
_dispatcher_map = {}
if _global_loop:
_global_loop._cleanup()
_global_loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
_global_loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
# start the event loop if needed
_global_loop.maybe_start()
init_handler = WaitableTimer(
timeout=0,
callback=partial(asyncore.dispatcher.__init__,
self, self._socket, _dispatcher_map)
)
_global_loop.add_timer(init_handler)
init_handler.wait(kwargs["connect_timeout"])
self._writable = True
self._readable = True
self._send_options_message()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
self._writable = False
self._readable = False
# We don't have to wait for this to be closed, we can just schedule it
self.create_timer(0, partial(asyncore.dispatcher.close, self))
log.debug("Closed socket to %s", self.endpoint)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
#This happens when the connection is shutdown while waiting for the ReadyMessage
if not self.connected_event.is_set():
self.last_error = ConnectionShutdown("Connection to %s was closed" % self.endpoint)
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING or
err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE)):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if isinstance(err, ssl.SSLError):
if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
if not self._iobuf.tell():
return
else:
self.defunct(err)
return
elif err.args[0] in NONBLOCKING:
if not self._iobuf.tell():
return
else:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._requests and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
_global_loop.wake_loop()
def writable(self):
return self._writable
def readable(self):
return self._readable or ((self.is_control_connection or self._continuous_paging_sessions) and not (self.is_defunct or self.is_closed))
|
the-stack_0_22527 | #!/usr/bin/env python
from __future__ import division
import argparse
import os.path as osp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # NOQA
import pandas # NOQA
import seaborn # NOQA
def learning_curve(log_file):
print('==> Plotting log file: %s' % log_file)
df = pandas.read_csv(log_file)
colors = ['red', 'green', 'blue', 'purple', 'orange']
colors = seaborn.xkcd_palette(colors)
plt.figure(figsize=(20, 6), dpi=300)
row_min = df.min()
row_max = df.max()
# initialize DataFrame for train
columns = [
'epoch',
'iteration',
'train/loss',
'train/acc',
'train/acc_cls',
'train/mean_iu',
'train/fwavacc',
]
df_train = df[columns]
if hasattr(df_train, 'rolling'):
df_train = df_train.rolling(window=10).mean()
else:
df_train = pandas.rolling_mean(df_train, window=10)
df_train = df_train.dropna()
iter_per_epoch = df_train[df_train['epoch'] == 1]['iteration'].values[0]
df_train['epoch_detail'] = df_train['iteration'] / iter_per_epoch
# initialize DataFrame for val
columns = [
'epoch',
'iteration',
'valid/loss',
'valid/acc',
'valid/acc_cls',
'valid/mean_iu',
'valid/fwavacc',
]
df_valid = df[columns]
df_valid = df_valid.dropna()
df_valid['epoch_detail'] = df_valid['iteration'] / iter_per_epoch
data_frames = {'train': df_train, 'valid': df_valid}
n_row = 2
n_col = 3
for i, split in enumerate(['train', 'valid']):
df_split = data_frames[split]
# loss
plt.subplot(n_row, n_col, i * n_col + 1)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df_split['epoch_detail'], df_split['%s/loss' % split], '-',
markersize=1, color=colors[0], alpha=.5,
label='%s loss' % split)
plt.xlim((0, row_max['epoch']))
plt.ylim((min(row_min['train/loss'], row_min['valid/loss']),
max(row_max['train/loss'], row_max['valid/loss'])))
plt.xlabel('epoch')
plt.ylabel('%s loss' % split)
# loss (log)
plt.subplot(n_row, n_col, i * n_col + 2)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.semilogy(df_split['epoch_detail'], df_split['%s/loss' % split],
'-', markersize=1, color=colors[0], alpha=.5,
label='%s loss' % split)
plt.xlim((0, row_max['epoch']))
plt.ylim((min(row_min['train/loss'], row_min['valid/loss']),
max(row_max['train/loss'], row_max['valid/loss'])))
plt.xlabel('epoch')
plt.ylabel('%s loss (log)' % split)
# lbl accuracy
plt.subplot(n_row, n_col, i * n_col + 3)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df_split['epoch_detail'], df_split['%s/acc' % split],
'-', markersize=1, color=colors[1], alpha=.5,
label='%s accuracy' % split)
plt.plot(df_split['epoch_detail'], df_split['%s/acc_cls' % split],
'-', markersize=1, color=colors[2], alpha=.5,
label='%s accuracy class' % split)
plt.plot(df_split['epoch_detail'], df_split['%s/mean_iu' % split],
'-', markersize=1, color=colors[3], alpha=.5,
label='%s mean IU' % split)
plt.plot(df_split['epoch_detail'], df_split['%s/fwavacc' % split],
'-', markersize=1, color=colors[4], alpha=.5,
label='%s fwav accuracy' % split)
plt.legend()
plt.xlim((0, row_max['epoch']))
plt.ylim((0, 1))
plt.xlabel('epoch')
plt.ylabel('%s label accuracy' % split)
out_file = osp.splitext(log_file)[0] + '.png'
plt.savefig(out_file)
print('==> Wrote figure to: %s' % out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('log_file')
args = parser.parse_args()
log_file = args.log_file
learning_curve(log_file)
if __name__ == '__main__':
main()
|
the-stack_0_22528 | import gym
from gym import wrappers
import random
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
# import keras
# from keras.layers.core import input_data, dropout, fully_connected
# from keras.layers.estimator import regression
from statistics import mean, median
from collections import Counter
LR = 1e-3 # learning rate
env = gym.make('CartPole-v0').env # environment
goal_steps = 500
# The score requirement threshold is the top 10% of all scores.
score_requirement = 60 # learn from game runs that at least get a score of 50
initial_games = 50000
# Let's start off by defining some random game runs
print('random_games')
def random_games_for_testing(isThereAModel=0, numberOfEpisodes=5, render=0):
scores = []
choices = []
for episode in range(numberOfEpisodes):
score = 0
game_memory = []
prev_obs = []
env.reset()
for t in range(goal_steps):
if render == 1:
env.render() # to see what is going on in game, remove for faster code
if len(prev_obs) > 0 and isThereAModel == 1:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(prev_obs), 1))[0])
else:
action = env.action_space.sample() # this generates random actions in any environment,
# it's sometimes a good place to start
# A lot of times what can happen is that your neural network will converge towards
# once thing. In order to understand what is going on, we are going to save all
# the choices taken by the agent in case we want understand what the ratio our
# neural network is predicting at. This could help with debugging.
choices.append(action)
# Let's save the information we get from each step in the game.
# Observation can be, in many games, the pixel data, but
# in CartPole-v0 it is pull position, cart position, etc.
# Reward will give us a 1 or 0 based on whether the CartPole
# was balanced (1) or not (0).
# Done tells you whether the game run is over or not.
# Info contains any other info provided. You can usually use
# this during debugging or fine-tuning.
observation, reward, done, info = env.step(action)
prev_obs = observation
# game_memory is important if we want to retrain our neural network. Our model
# will get stronger as we retrain.
game_memory.append([observation, action])
score += reward
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
scores.append(score)
print('Average Score', sum(scores) / len(scores))
print('Choice 1: {}, Choice 0: {}'.format(choices.count(1) / len(choices),
choices.count(0) / len(choices)))
# random_games_for_testing()
print('init_pop')
def initial_population():
# Training data contains observation and move made, the moves will all be random.
# We will only append training data to training_data if score is higher than
# score_requirement (50).
training_data = []
scores = []
accepted_scores = []
env.reset()
for _ in range(initial_games):
score = 0
# We will store all the movements and such in game_memory because we will
# only know at the end of the game run whether we beat the score requirement.
# Then, we can place the accepted runs in training_data and accepted_scores.
game_memory = []
prev_observation = []
# The following loop runs 1 game of CartPool-v0
for _ in range(goal_steps):
action = env.action_space.sample() # replace with env.action_space.sample()
observation, reward, done, info = env.step(action)
# Here we will basically connect the action taken with the previous observation
# instead of the current observation (like in the following commented line).
# game_memory.append([observation, action])
if len(prev_observation) > 0:
game_memory.append([prev_observation, action])
prev_observation = observation
score += reward # score only increases by 1 if CartPole is balanced
if done:
break
# We can now save the score in training_data and accepted_scores that passed
# the score_requirement threshold for 1 particular game run.
if score >= score_requirement:
accepted_scores.append(score)
for data in game_memory:
if data[1] == 1:
output = [0,1]
elif data[1] == 0:
output = [1,0]
training_data.append([data[0], output])
env.reset()
scores.append(score)
training_data_save = np.array(training_data)
np.save('saved.npy', training_data_save)
print('Average accepted score:', mean(accepted_scores))
print('Median accepted score:', median(accepted_scores))
print(Counter(accepted_scores))
print(len(training_data))
return training_data
initial_population()
# The following is the neural network using a tflearn implementation.
# In TensorFlow, we can save models and use them later. To use a saved model, we actually
# need to have a model already defined. We also need to make sure that our saved model
# and the model we've defined are the same input size. Therefore, the neural_network_model
# function has an input that describes the size of our models.
# Generally, we want to seperate out the model, the training of the model and usage of the model.
print('NN')
def neural_network_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
# fully_connected(Incoming, number of units, activation function)
# dropout(Incoming, keep_prob : A float representing the probability that each element is kept.)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
# output layer
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
print('train_NN')
def train_model(training_data, model=False):
X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]), 1)
y = np.array([i[1] for i in training_data])
# if we don't already have a model
if not model:
model = neural_network_model(input_size=len(X[0]))
# we choose 5 epochs for the fit since too many epochs can lead to overfitting.
# I will further comment if I decide to change the value based on results.
model.fit({'input':X}, {'targets':y}, n_epoch=2, snapshot_step=500, show_metric=True,
run_id='CartPole-v0-tflearn')
return model
print('training_data save')
training_data = initial_population()
print('model')
model = train_model(training_data)
print('final run')
# env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1', force=True)
random_games_for_testing(isThereAModel=1, numberOfEpisodes=10, render=0)
model.save('test4.tflearn')
|
the-stack_0_22534 | import json
import configparser
import logging
from .gpt_plugin import GPTPlugin
from .gpt_utils import join_url
logger = logging.getLogger(__name__)
class Clockify(GPTPlugin):
name = "clockify"
url = "https://api.clockify.me"
def __init__(self, gpt):
super().__init__(gpt)
def setup(self):
try:
self.session.update({"token": self.gpt.gptconfig_get(self.name, "token")})
except configparser.NoSectionError as e:
#logger.error(str(e))
self.gpt.gptconfig_set_section(self.name)
self.add_parse_args(kind="setup-args")
except configparser.NoOptionError as e:
#logger.error(str(e))
self.add_parse_args(kind="setup-args")
params = self.gpt.gptparse_params()
self.session.update({"token": params.clockify_token})
try:
if self.auth():
self.gpt.gptconfig_set(self.name, "token", self.token())
print(f"{self.name} now can do you use.")
except Exception as e:
logger.error(str(e))
exit(0)
token = lambda self: self.session.get("token", "")
http_headers = lambda self: {'X-Api-Key': self.token()}
def add_parse_args(self, kind):
if kind == "setup-args":
self.gpt.parse.add_argument('--clockify-token',
action='store',
dest='clockify_token',
help=' e.g XtGTMKadTS8sJ/E',
required=True
)
else:
# Overwrite
self.gpt.parse.add_argument('--clockify-workspaces',
action='store_const',
dest='clockify_workspaces',
help='List clockify workspaces',
const=True,
)
self.gpt.parse.add_argument('--clockify-projects',
action='store_const',
dest='clockify_projects',
help='List clockify projects',
const=True,
)
def auth(self):
url = join_url(self.url, "/api/v1/user")
try:
data = self.http_call('GET', url, headers= self.http_headers())
return data.get("id") != ""
except Exception as e :
msg =""
try:
err = json.loads(str(e))
msg = err.get("message", "Fail Auth")
except Exception as jl:
pass
raise Exception(msg)
return False
def cli(self):
# Overwrite
params = self.gpt.gptparse_params()
def findbyid(rows, id):
for row in rows:
for k in row.keys():
if k == 'id' and row.get(k) == id:
return row
return None
def onlycolumns(rows):
l = []
for r in rows:
l.append( { 'id': r.get('id'), 'name': r.get('name')})
return l
if params.clockify_workspaces:
try:
rows = self.workspaces()
if rows:
rows = onlycolumns(rows)
title ="Clockify workspaces"
if params.set:
row = findbyid(rows, params.set)
if row:
self.gpt.gptconfig_set(self.name, "workspace_id",row.get('id') )
self.gpt.gptconfig_set(self.name, "workspace_name",row.get('name') )
self.gpt.print_cli([], title= 'the workspace was added successfully')
else:
self.gpt.print_cli([], title= 'the workspace id was not found')
else:
self.gpt.print_cli(rows, title=title)
else:
raise Exception("Fail get workspaces")
except Exception as e:
self.gpt.exit(e)
elif params.clockify_projects:
try:
workspace_id = self.gpt.gptconfig_get(self.name, "workspace_id")
except Exception as e:
#logger.error(e)
workspace = self.workspaces(filter='first')
workspace_id = workspace.get('id')
try:
rows = self.projects(workspace_id)
if rows:
rows = onlycolumns(rows)
title ="Clockify projects"
if params.set:
row = findbyid(rows, params.set)
if row:
self.gpt.gptconfig_set(self.name, "project_id",row.get('id') )
self.gpt.gptconfig_set(self.name, "project_name",row.get('name') )
self.gpt.print_cli([], title= 'the project was added successfully')
else:
self.gpt.print_cli([], title= 'the project id was not found')
else:
self.gpt.print_cli(rows, title=title)
else:
raise Exception("Fail get projects")
except Exception as e:
raise Exception(e)
def workspaces(self, filter=""):
url = join_url(self.url, "/api/v1/workspaces")
try:
data = self.http_call('GET', url, headers = self.http_headers())
if filter =='first':
if data:
return len(data) and data[0]
return data
except:
pass
return None
def projects(self, workspace_id, filter=""):
url =join_url(self.url,f"/api/v1/workspaces/{workspace_id}/projects")
try:
data = self.http_call('GET', url, headers = self.http_headers())
if filter =='first' :
return len(data) and data[0]
return data
except:
pass
return None
def add_time_entry(self, **kwargs ):
# Overwrite
description = kwargs.get('name')
start= kwargs.get('start')
end= kwargs.get('end')
workspace_id = ""
try:
workspace_id = self.gpt.gptconfig_get(self.name, "workspace_id")
except:
try:
workspace = self.workspaces(filter='first')
workspace_id = workspace.get('id')
except:
pass
project_id = None
try:
project_id = self.gpt.gptconfig_get(self.name, "project_id")
except:
pass
time_entry = {
"start": start, # Required
"description": description,
"projectId": project_id,
"end": end, # Required
}
try:
url = join_url(self.url, f"api/v1/workspaces/{workspace_id}/time-entries")
data = self.http_call('POST', url,json= time_entry, headers= self.http_headers())
return data["id"]
except Exception as e:
pass
return -1
def status(self, **kwargs):
# Overwrite
items = []
def getstate(param):
try:
id = self.gpt.gptconfig_get(self.name, param+"_id")
name =self.gpt.gptconfig_get(self.name, param+"_name")
if len(id) and len(name):
items.append({'name': "%s: %s - %s " % (str(param).title(), id, name)})
except:
pass
getstate('workspace')
getstate('project')
self.gpt.print_cli(items)
|
the-stack_0_22537 | # -*- coding: utf-8 -*-
import io
import os
import sys
import json
import yaml
import fnmatch
import nbformat
import requests
import warnings
import entrypoints
from contextlib import contextmanager
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_exponential
from . import __version__
from .log import logger
from .utils import chdir
from .exceptions import (
PapermillException,
PapermillRateLimitException,
missing_dependency_generator,
missing_environment_variable_generator,
)
try:
from .s3 import S3
except ImportError:
S3 = missing_dependency_generator("boto3", "s3")
try:
from .adl import ADL
except ImportError:
ADL = missing_dependency_generator("azure.datalake.store", "azure")
except KeyError as exc:
if exc.args[0] == "APPDATA":
ADL = missing_environment_variable_generator("azure.datalake.store", "APPDATA")
else:
raise
try:
from .abs import AzureBlobStore
except ImportError:
AzureBlobStore = missing_dependency_generator("azure.storage.blob", "azure")
try:
from gcsfs import GCSFileSystem
except ImportError:
GCSFileSystem = missing_dependency_generator("gcsfs", "gcs")
try:
from pyarrow import HadoopFileSystem
except ImportError:
HadoopFileSystem = missing_dependency_generator("pyarrow", "hdfs")
def fallback_gs_is_retriable(e):
try:
print(e.code)
return e.code is None or e.code == 429
except AttributeError:
print(e)
return False
try:
# Default to gcsfs library's retry logic
from gcsfs.utils import is_retriable as gs_is_retriable
except ImportError:
gs_is_retriable = fallback_gs_is_retriable
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class PapermillIO(object):
'''
The holder which houses any io system registered with the system.
This object is used in a singleton manner to save and load particular
named Handler objects for reference externally.
'''
def __init__(self):
self.reset()
def read(self, path, extensions=['.ipynb', '.json']):
if path == '-':
return sys.stdin.read()
if not fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*.*'):
warnings.warn(
"the file is not specified with any extension : " + os.path.basename(path)
)
elif not any(
fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*' + ext) for ext in extensions
):
warnings.warn(
"The specified input file ({}) does not end in one of {}".format(path, extensions)
)
# Handle https://github.com/nteract/papermill/issues/317
notebook_metadata = self.get_handler(path).read(path)
if isinstance(notebook_metadata, (bytes, bytearray)):
return notebook_metadata.decode('utf-8')
return notebook_metadata
def write(self, buf, path, extensions=['.ipynb', '.json']):
if path == '-':
try:
return sys.stdout.buffer.write(buf.encode('utf-8'))
except AttributeError:
# Originally required by https://github.com/nteract/papermill/issues/420
# Support Buffer.io objects
return sys.stdout.write(buf.encode('utf-8'))
return sys.stdout.buffer.write(buf.encode('utf-8'))
# Usually no return object here
if not fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*.*'):
warnings.warn(
"the file is not specified with any extension : " + os.path.basename(path)
)
elif not any(
fnmatch.fnmatch(os.path.basename(path).split('?')[0], '*' + ext) for ext in extensions
):
warnings.warn(
"The specified input file ({}) does not end in one of {}".format(path, extensions)
)
return self.get_handler(path).write(buf, path)
def listdir(self, path):
return self.get_handler(path).listdir(path)
def pretty_path(self, path):
return self.get_handler(path).pretty_path(path)
def reset(self):
self._handlers = []
def register(self, scheme, handler):
# Keep these ordered as LIFO
self._handlers.insert(0, (scheme, handler))
def register_entry_points(self):
# Load handlers provided by other packages
for entrypoint in entrypoints.get_group_all("papermill.io"):
self.register(entrypoint.name, entrypoint.load())
def get_handler(self, path):
local_handler = None
for scheme, handler in self._handlers:
if scheme == 'local':
local_handler = handler
if path.startswith(scheme):
return handler
if local_handler is None:
raise PapermillException(
"Could not find a registered schema handler for: {}".format(path)
)
return local_handler
class HttpHandler(object):
@classmethod
def read(cls, path):
return requests.get(path, headers={'Accept': 'application/json'}).text
@classmethod
def listdir(cls, path):
raise PapermillException('listdir is not supported by HttpHandler')
@classmethod
def write(cls, buf, path):
result = requests.put(path, json=json.loads(buf))
result.raise_for_status()
@classmethod
def pretty_path(cls, path):
return path
class LocalHandler(object):
def __init__(self):
self._cwd = None
def read(self, path):
try:
with chdir(self._cwd):
with io.open(path, 'r', encoding="utf-8") as f:
return f.read()
except IOError as e:
try:
# Check if path could be a notebook passed in as a
# string
json.loads(path)
return path
except ValueError:
# Propagate the IOError
raise e
def listdir(self, path):
with chdir(self._cwd):
return [os.path.join(path, fn) for fn in os.listdir(path)]
def write(self, buf, path):
with chdir(self._cwd):
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
raise FileNotFoundError("output folder {} doesn't exist.".format(dirname))
with io.open(path, 'w', encoding="utf-8") as f:
f.write(buf)
def pretty_path(self, path):
return path
def cwd(self, new_path):
'''Sets the cwd during reads and writes'''
old_cwd = self._cwd
self._cwd = new_path
return old_cwd
class S3Handler(object):
@classmethod
def read(cls, path):
return "\n".join(S3().read(path))
@classmethod
def listdir(cls, path):
return S3().listdir(path)
@classmethod
def write(cls, buf, path):
return S3().cp_string(buf, path)
@classmethod
def pretty_path(cls, path):
return path
class ADLHandler(object):
def __init__(self):
self._client = None
def _get_client(self):
if self._client is None:
self._client = ADL()
return self._client
def read(self, path):
lines = self._get_client().read(path)
return "\n".join(lines)
def listdir(self, path):
return self._get_client().listdir(path)
def write(self, buf, path):
return self._get_client().write(buf, path)
def pretty_path(self, path):
return path
class ABSHandler(object):
def __init__(self):
self._client = None
def _get_client(self):
if self._client is None:
self._client = AzureBlobStore()
return self._client
def read(self, path):
lines = self._get_client().read(path)
return "\n".join(lines)
def listdir(self, path):
return self._get_client().listdir(path)
def write(self, buf, path):
return self._get_client().write(buf, path)
def pretty_path(self, path):
return path
class GCSHandler(object):
RATE_LIMIT_RETRIES = 3
RETRY_DELAY = 1
RETRY_MULTIPLIER = 1
RETRY_MAX_DELAY = 4
def __init__(self):
self._client = None
def _get_client(self):
if self._client is None:
self._client = GCSFileSystem()
return self._client
def read(self, path):
with self._get_client().open(path) as f:
return f.read()
def listdir(self, path):
return self._get_client().ls(path)
def write(self, buf, path):
# Wrapped so we can mock retry options during testing
@retry(
retry=retry_if_exception_type(PapermillRateLimitException),
stop=stop_after_attempt(self.RATE_LIMIT_RETRIES),
wait=wait_exponential(
multiplier=self.RETRY_MULTIPLIER, min=self.RETRY_DELAY, max=self.RETRY_MAX_DELAY
),
reraise=True,
)
def retry_write():
try:
with self._get_client().open(path, 'w') as f:
return f.write(buf)
except Exception as e:
try:
message = e.message
except AttributeError:
message = "Generic exception {} raised".format(type(e))
if gs_is_retriable(e):
raise PapermillRateLimitException(message)
# Reraise the original exception without retries
raise
return retry_write()
def pretty_path(self, path):
return path
class HDFSHandler(object):
def __init__(self):
self._client = None
def _get_client(self):
if self._client is None:
self._client = HadoopFileSystem()
return self._client
def read(self, path):
with self._get_client().open(path, 'rb') as f:
return f.read()
def listdir(self, path):
return self._get_client().ls(path)
def write(self, buf, path):
with self._get_client().open(path, 'wb') as f:
return f.write(str.encode(buf))
def pretty_path(self, path):
return path
# Hack to make YAML loader not auto-convert datetimes
# https://stackoverflow.com/a/52312810
class NoDatesSafeLoader(yaml.SafeLoader):
yaml_implicit_resolvers = {
k: [r for r in v if r[0] != 'tag:yaml.org,2002:timestamp']
for k, v in yaml.SafeLoader.yaml_implicit_resolvers.items()
}
# Instantiate a PapermillIO instance and register Handlers.
papermill_io = PapermillIO()
papermill_io.register("local", LocalHandler())
papermill_io.register("s3://", S3Handler)
papermill_io.register("adl://", ADLHandler())
papermill_io.register("abs://", ABSHandler())
papermill_io.register("http://", HttpHandler)
papermill_io.register("https://", HttpHandler)
papermill_io.register("gs://", GCSHandler())
papermill_io.register("hdfs://", HDFSHandler())
papermill_io.register_entry_points()
def read_yaml_file(path):
"""Reads a YAML file from the location specified at 'path'."""
return yaml.load(papermill_io.read(path, ['.json', '.yaml', '.yml']), Loader=NoDatesSafeLoader)
def write_ipynb(nb, path):
"""Saves a notebook object to the specified path.
Args:
nb_node (nbformat.NotebookNode): Notebook object to save.
notebook_path (str): Path to save the notebook object to.
"""
papermill_io.write(nbformat.writes(nb), path)
def load_notebook_node(notebook_path):
"""Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode
"""
nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': __version__,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = [] # Create tags attr if one doesn't exist.
if not hasattr(cell.metadata, 'papermill'):
cell.metadata['papermill'] = dict()
return nb
def list_notebook_files(path):
"""Returns a list of all the notebook files in a directory."""
return [p for p in papermill_io.listdir(path) if p.endswith('.ipynb')]
def get_pretty_path(path):
return papermill_io.pretty_path(path)
@contextmanager
def local_file_io_cwd(path=None):
try:
local_handler = papermill_io.get_handler("local")
except PapermillException:
logger.warning("No local file handler detected")
else:
try:
old_cwd = local_handler.cwd(path or os.getcwd())
except AttributeError:
logger.warning("Local file handler does not support cwd assignment")
else:
try:
yield
finally:
local_handler.cwd(old_cwd)
|
the-stack_0_22538 | from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock, patch
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_PRESET_MODE,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, STATE_UNAVAILABLE
from custom_components.goldair_climate.geco_heater.climate import GoldairGECOHeater
from custom_components.goldair_climate.geco_heater.const import (
ATTR_ERROR,
ATTR_TARGET_TEMPERATURE,
HVAC_MODE_TO_DPS_MODE,
PROPERTY_TO_DPS_ID,
)
from ..const import GECO_HEATER_PAYLOAD
from ..helpers import assert_device_properties_set
class TestGoldairGECOHeater(IsolatedAsyncioTestCase):
def setUp(self):
device_patcher = patch(
"custom_components.goldair_climate.device.GoldairTuyaDevice"
)
self.addCleanup(device_patcher.stop)
self.mock_device = device_patcher.start()
self.subject = GoldairGECOHeater(self.mock_device())
self.dps = GECO_HEATER_PAYLOAD.copy()
self.subject._device.get_property.side_effect = lambda id: self.dps[id]
def test_supported_features(self):
self.assertEqual(
self.subject.supported_features, SUPPORT_TARGET_TEMPERATURE,
)
def test_should_poll(self):
self.assertTrue(self.subject.should_poll)
def test_name_returns_device_name(self):
self.assertEqual(self.subject.name, self.subject._device.name)
def test_unique_id_returns_device_unique_id(self):
self.assertEqual(self.subject.unique_id, self.subject._device.unique_id)
def test_device_info_returns_device_info_from_device(self):
self.assertEqual(self.subject.device_info, self.subject._device.device_info)
def test_icon(self):
self.dps[PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]] = True
self.assertEqual(self.subject.icon, "mdi:radiator")
self.dps[PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]] = False
self.assertEqual(self.subject.icon, "mdi:radiator-disabled")
def test_temperature_unit_returns_device_temperature_unit(self):
self.assertEqual(
self.subject.temperature_unit, self.subject._device.temperature_unit
)
def test_target_temperature(self):
self.dps[PROPERTY_TO_DPS_ID[ATTR_TARGET_TEMPERATURE]] = 25
self.assertEqual(self.subject.target_temperature, 25)
def test_target_temperature_step(self):
self.assertEqual(self.subject.target_temperature_step, 1)
def test_minimum_target_temperature(self):
self.assertEqual(self.subject.min_temp, 15)
def test_maximum_target_temperature(self):
self.assertEqual(self.subject.max_temp, 35)
async def test_legacy_set_temperature_method(self):
async with assert_device_properties_set(
self.subject._device, {PROPERTY_TO_DPS_ID[ATTR_TARGET_TEMPERATURE]: 25}
):
await self.subject.async_set_temperature(temperature=25)
async def test_legacy_set_temperature_does_nothing_without_temperature_value(self):
await self.subject.async_set_temperature(something="else")
self.subject._device.async_set_property.assert_not_called()
async def test_set_target_temperature_succeeds_within_valid_range(self):
async with assert_device_properties_set(
self.subject._device, {PROPERTY_TO_DPS_ID[ATTR_TARGET_TEMPERATURE]: 25}
):
await self.subject.async_set_target_temperature(25)
async def test_set_target_temperature_rounds_value_to_closest_integer(self):
async with assert_device_properties_set(
self.subject._device, {PROPERTY_TO_DPS_ID[ATTR_TARGET_TEMPERATURE]: 25},
):
await self.subject.async_set_target_temperature(24.6)
async def test_set_target_temperature_fails_outside_valid_range(self):
with self.assertRaisesRegex(
ValueError, "Target temperature \\(14\\) must be between 15 and 35"
):
await self.subject.async_set_target_temperature(14)
with self.assertRaisesRegex(
ValueError, "Target temperature \\(36\\) must be between 15 and 35"
):
await self.subject.async_set_target_temperature(36)
def test_current_temperature(self):
self.dps[PROPERTY_TO_DPS_ID[ATTR_TEMPERATURE]] = 25
self.assertEqual(self.subject.current_temperature, 25)
def test_hvac_mode(self):
self.dps[PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]] = True
self.assertEqual(self.subject.hvac_mode, HVAC_MODE_HEAT)
self.dps[PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]] = False
self.assertEqual(self.subject.hvac_mode, HVAC_MODE_OFF)
self.dps[PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]] = None
self.assertEqual(self.subject.hvac_mode, STATE_UNAVAILABLE)
def test_hvac_modes(self):
self.assertEqual(self.subject.hvac_modes, [HVAC_MODE_OFF, HVAC_MODE_HEAT])
async def test_turn_on(self):
async with assert_device_properties_set(
self.subject._device, {PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]: True}
):
await self.subject.async_set_hvac_mode(HVAC_MODE_HEAT)
async def test_turn_off(self):
async with assert_device_properties_set(
self.subject._device, {PROPERTY_TO_DPS_ID[ATTR_HVAC_MODE]: False}
):
await self.subject.async_set_hvac_mode(HVAC_MODE_OFF)
def test_error_state(self):
# There are currently no known error states; update this as they're discovered
self.dps[PROPERTY_TO_DPS_ID[ATTR_ERROR]] = "something"
self.assertEqual(
self.subject.device_state_attributes, {ATTR_ERROR: "something"}
)
async def test_update(self):
result = AsyncMock()
self.subject._device.async_refresh.return_value = result()
await self.subject.async_update()
self.subject._device.async_refresh.assert_called_once()
result.assert_awaited()
|
the-stack_0_22541 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
parser = argparse.ArgumentParser(description='Remove the coverage data from a tracefile for all files matching the pattern.')
parser.add_argument('--pattern', '-p', action='append', help='the pattern of files to remove', required=True)
parser.add_argument('tracefile', help='the tracefile to remove the coverage data from')
parser.add_argument('outfile', help='filename for the output to be written to')
args = parser.parse_args()
tracefile = args.tracefile
pattern = args.pattern
outfile = args.outfile
in_remove = False
with open(tracefile, 'r', encoding="utf8") as f:
with open(outfile, 'w', encoding="utf8") as wf:
for line in f:
for p in pattern:
if line.startswith("SF:") and p in line:
in_remove = True
if not in_remove:
wf.write(line)
if line == 'end_of_record\n':
in_remove = False
|
the-stack_0_22544 | #!/usr/bin/env python3
# Send notifications based on makerware print job status (from parsing logfile)
#
# TODO: implement logging
# TODO: update instructions for USB thumbdrive and motion webcam capture
#
# DONE
# - email notification
# - use most recent logfile if none specified
# - send image from webcam with email?
# - create github repo
#
import dateutil.parser
import datetime
import json
import os
from os.path import basename
import re
import smtplib
import sys
import time
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
exc_path = os.path.dirname(sys.argv[0])
CONFIG = {}
with open(os.path.join(exc_path, 'example.json'), 'r') as jsonfile:
CONFIG = json.loads(jsonfile.read())
print(CONFIG)
MOTION_DIR = CONFIG[1]['motion']['motion_dir']
if not os.path.isdir(MOTION_DIR):
err_msg = "ERROR: Could not find MOTION directory %s" % MOTION_DIR
raise SystemExit(err_msg)
print('will send any email as', CONFIG[0]['gmail']['gmail_user'])
print('will look in motion dir', MOTION_DIR)
# http://code.activestate.com/recipes/157035-tail-f-in-python/#c4
def tail_f(file):
interval = 1.0
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
def send_notification(data):
# print data
status = data['status']
start = data['start_time']
done = data['change_time']
filename = data['filename']
duration = done - start
s = '%(duration)s job %(filename)s %(status)s' % vars()
print(s)
short = '%(status)s %(filename)s' % vars()
if done > CURRENT_TIME:
print("will send notice")
send_email(short, s)
def send_email(subj, jobinfo):
print("sending email")
email_config = CONFIG[0]['gmail']
gmail_user = email_config['gmail_user']
gmail_password = email_config['gmail_password']
to = email_config['email_to_list']
subject = subj
fromc = gmail_user
msg = MIMEMultipart()
msg['From'] = fromc
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(jobinfo))
# print msg
motion_dir = MOTION_DIR
dated_files = [(
os.path.getmtime(os.path.join(motion_dir, fn)),
os.path.basename(os.path.join(motion_dir, fn)),
) for fn in os.listdir(motion_dir) if fn.lower().endswith('.jpg')]
# print dated_files[-1]
dated_files.sort()
dated_files.reverse()
newest = dated_files[0][1]
newest_minus_some = dated_files[9][1]
print(newest, newest_minus_some)
filename = os.path.join(motion_dir, newest_minus_some)
files = [filename]
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(fil.read(), Name=basename(f))
part[
'Content-Disposition'] = 'attachment; filename="%s"' % basename(
f)
msg.attach(part)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
if True:
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(fromc, to, msg.as_string())
server.close()
print('Email sent!')
except:
print('Something went wrong...')
inputFile = re.compile(r'\s*"input_file"\s*\:\s*"([^"]+)"')
# "input_file" : "/tmp/MakerBot Desktop-Oyu7m1/Deanerys_v1.gcode",
jobWatch = re.compile(r"conveyor::JobWatch")
jobWatchjobSetJobID = re.compile(r"JobWatch::setJobID")
setJobID = re.compile(r"set job ID (\d+)")
jobWatchjobChanged = re.compile(r"JobWatch::jobChanged")
jobChanged = re.compile(r"Job (\d+) (\S+)")
# void conveyor::JobWatch::setJobID(conveyor::JobID)
# 2016-03-06 18:41:38
# PrintDialog job: set job ID 0
# void conveyor::JobWatch::jobChanged(conveyor::JobID)
# 2016-03-06 19:15:17
# Job 0 canceled
# void conveyor::JobWatch::setJobID(conveyor::JobID)
# 2016-03-06 19:17:17
# PrintDialog job: set job ID 1
# void conveyor::JobWatch::jobChanged(conveyor::JobID)
# 2016-03-06 21:52:39
# Job 1 concluded
# >>> import dateutil.parser
# >>> dateutil.parser.parse("Sun Mar 6 21:52:39 2016")
# datetime.datetime(2016, 3, 6, 21, 52, 39)
line1 = ""
line2 = ""
line3 = ""
i = 0
CURRENT_TIME = datetime.datetime.now()
JOB_INFO = {}
filename = ""
# Logfiles live in ~/Things/Logs
logfile_dir = os.path.expanduser("~/Things/Logs")
if sys.argv[1:]:
logfile_name = sys.argv[1]
else:
dated_files = [(
os.path.getmtime(os.path.join(logfile_dir, fn)),
os.path.basename(os.path.join(logfile_dir, fn)),
) for fn in os.listdir(logfile_dir) if fn.lower().endswith('.log')]
# print dated_files
dated_files.sort()
dated_files.reverse()
newest = dated_files[0][1]
logfile_name = os.path.join(logfile_dir, newest)
print(logfile_name)
for line in tail_f(open(logfile_name, 'r')):
i += 1
line1 = line2
line2 = line3
line3 = line
# print i
m = jobWatch.search(line1)
if m:
# print JOB_INFO
timestamp = dateutil.parser.parse(line2)
# print line1.strip()
# print timestamp
print(line3.strip())
if jobWatchjobChanged.search(line1):
status = jobChanged.search(line3)
if status:
thisJobID = status.group(1)
thisStatus = status.group(2)
JOB_INFO[thisJobID]['change_time'] = timestamp
JOB_INFO[thisJobID]['status'] = thisStatus
JOB_INFO[thisJobID]['filename'] = filename
notify_data = JOB_INFO[thisJobID]
send_notification(notify_data)
# print status.groups()
elif jobWatchjobSetJobID.search(line1):
jobId = setJobID.search(line3)
if jobId:
JOB_INFO[jobId.group(1)] = {'start_time': timestamp}
g = inputFile.match(line1)
if g:
# print g.groups()
filename = os.path.basename(g.group(1))
# print filename
|
the-stack_0_22545 | # -*- coding: utf-8 -*-
# dcf
# ---
# A Python library for generating discounted cashflows.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.5, copyright Sunday, 21 November 2021
# Website: https://github.com/sonntagsgesicht/dcf
# License: Apache License 2.0 (see LICENSE file)
from abc import ABC
from sys import float_info
from .curve import RateCurve
from .compounding import continuous_compounding, continuous_rate
from .interpolation import constant, linear, logconstantrate, loglinearrate, neglogconstant, negloglinear
from . import dyn_scheme
class CreditCurve(RateCurve, ABC):
""" generic curve for default probabilities (under construction) """
_forward_tenor = '1Y'
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
if isinstance(domain, RateCurve):
# if argument is a curve add extra curve points to domain for better approximation
if data:
raise TypeError("If first argument is %s, data argument must not be given." % domain.__class__.__name__)
data = domain
domain = sorted(set(list(data.domain) + [max(data.domain) + '1y']))
super(CreditCurve, self).__init__(domain, data, interpolation, origin, day_count, forward_tenor)
def get_survival_prob(self, start, stop=None): # aka get_discount_factor
if stop is None:
return self.get_survival_prob(self.origin, start)
return self._get_compounding_factor(start, stop)
def get_flat_intensity(self, start, stop=None): # aka get_zero_rate
if stop is None:
return self.get_flat_intensity(self.origin, start)
return self._get_compounding_rate(start, stop)
def get_hazard_rate(self, start): # aka get_short_rate
if start < min(self.domain):
return self.get_hazard_rate(min(self.domain))
if max(self.domain) <= start:
return self.get_hazard_rate(max(self.domain) - self.__class__._time_shift)
previous = max(d for d in self.domain if d <= start)
follow = min(d for d in self.domain if start < d)
if not previous <= start <= follow:
raise AssertionError()
if not previous < follow:
raise AssertionError(list(map(str, (previous, start, follow))))
return self.get_flat_intensity(previous, follow)
class ProbabilityCurve(CreditCurve, ABC):
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
# validate probabilities
if not isinstance(data, RateCurve):
data = [max(float_info.min, min(d, 1. - float_info.min)) for d in data]
if not all(data):
raise ValueError('Found non positive survival probabilities.')
# if argument is a curve add extra curve points to domain for better approximation
if isinstance(domain, RateCurve):
if data:
raise TypeError("If first argument is %s, data argument must not be given." % domain.__class__.__name__)
data = domain
origin = data.origin if origin is None else origin
domain = sorted(set(list(data.domain) + [origin + '1d', max(data.domain) + '1y']))
super(ProbabilityCurve, self).__init__(domain, data, interpolation, origin, day_count, forward_tenor)
class SurvivalProbabilityCurve(ProbabilityCurve):
_interpolation = dyn_scheme(logconstantrate, loglinearrate, logconstantrate)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(curve.origin, x)
def _get_compounding_factor(self, start, stop):
if start is self.origin:
return self(stop)
if start == stop:
return 1. if 2*float_info.min <= self(start) else 0.
return self(stop) / self(start)
def _get_compounding_rate(self, start, stop):
if start == stop == self.origin:
# intensity proxi at origin
stop = min(d for d in self.domain if self.origin < d)
# todo: calc left extrapolation (for linear zero rate interpolation)
return super(SurvivalProbabilityCurve, self)._get_compounding_rate(start, stop)
class DefaultProbabilityCurve(SurvivalProbabilityCurve):
""" wrapper of SurvivalProbabilityCurve """
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(curve.origin, x)
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
if not isinstance(data, RateCurve):
data = [1. - d for d in data]
super(DefaultProbabilityCurve, self).__init__(domain, data, interpolation, origin, day_count, forward_tenor)
class FlatIntensityCurve(CreditCurve):
_interpolation = dyn_scheme(constant, linear, constant)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_flat_intensity(curve.origin, x)
def _get_compounding_rate(self, start, stop):
if start == stop == self.origin:
return self(self.origin)
if start is self.origin:
return self(stop)
if start == stop:
return self._get_compounding_rate(start, start + self.__class__._time_shift)
s = self(start) * self.day_count(self.origin, start)
e = self(stop) * self.day_count(self.origin, stop)
t = self.day_count(start, stop)
return (e - s) / t
class HazardRateCurve(CreditCurve):
_interpolation = dyn_scheme(constant, constant, constant)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_hazard_rate(x)
def _get_compounding_rate(self, start, stop):
if start == stop:
return self(start)
current = start
rate = 0.0
step = self.__class__._time_shift
while current + step < stop:
rate += self(current) * self.day_count(current, current + step)
current += step
rate += self(current) * self.day_count(current, stop)
return rate / self.day_count(start, stop)
def get_hazard_rate(self, start): # aka get_short_rate
return self(start)
class MarginalSurvivalProbabilityCurve(ProbabilityCurve):
_interpolation = dyn_scheme(neglogconstant, negloglinear, neglogconstant)
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(x, x + curve.forward_tenor)
def _get_compounding_factor(self, start, stop):
if start == stop:
return 1. if 2*float_info.min <= self(start) else 0.
current = start
df = 1.0
step = self.forward_tenor
while current + step < stop:
df *= self(current) if 2 * float_info.min <= self(current) else 0.
current += step
if 2 * float_info.min <= self(current):
r = continuous_rate(self(current), self.day_count(current, current + step))
df *= continuous_compounding(r, self.day_count(current, stop))
else:
df *= 0.
return df
def get_hazard_rate(self, start): # aka get_short_rate
if start < min(self.domain):
return self.get_hazard_rate(min(self.domain))
if max(self.domain) <= start:
return self.get_flat_intensity(max(self.domain), max(self.domain) + self.__class__._time_shift)
previous = max(d for d in self.domain if d <= start)
follow = min(d for d in self.domain if start < d)
if not previous < follow:
raise AssertionError(list(map(str, (previous, start, follow))))
if not previous <= start <= follow:
raise AssertionError(list(map(str, (previous, start, follow))))
return self.get_flat_intensity(previous, follow)
class MarginalDefaultProbabilityCurve(MarginalSurvivalProbabilityCurve):
""" wrapper of SurvivalProbabilityCurve """
@staticmethod
def _get_storage_value(curve, x):
return curve.get_survival_prob(x, x + curve.forward_tenor)
def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):
if not isinstance(data, RateCurve):
data = [1. - d for d in data]
super(MarginalDefaultProbabilityCurve, self).__init__(
domain, data, interpolation, origin, day_count, forward_tenor)
|
the-stack_0_22546 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sensitivity', '0010_auto_20171031_1620'),
]
operations = [
migrations.AlterField(
model_name='species',
name='pictogram',
field=models.FileField(db_column='picto', upload_to='upload', max_length=512, blank=True, null=True, verbose_name='Pictogram'),
),
]
|
the-stack_0_22547 | from conductor.task_types import raw_task_types
from conductor.errors import (
ConductorError,
DuplicateTaskName,
MissingCondFile,
ParsingUnknownNameError,
TaskSyntaxError,
)
from conductor.task_types.stdlib import STDLIB_FILES
class TaskLoader:
def __init__(self):
self._tasks = None
self._current_cond_file_path = None
self._conductor_scope = self._compile_scope()
def parse_cond_file(self, cond_file_path):
"""
Parses all the tasks in a single COND file.
"""
tasks = {}
self._tasks = tasks
self._current_cond_file_path = cond_file_path
try:
with open(cond_file_path, encoding="UTF-8") as file:
code = file.read()
# pylint: disable=exec-used
exec(code, self._conductor_scope.copy())
return tasks
except ConductorError as ex:
ex.add_file_context(file_path=cond_file_path)
raise ex
except SyntaxError as ex:
syntax_err = TaskSyntaxError()
syntax_err.add_file_context(
file_path=cond_file_path,
line_number=ex.lineno,
)
raise syntax_err from ex
except NameError as ex:
name_err = ParsingUnknownNameError(error_message=str(ex))
name_err.add_file_context(file_path=cond_file_path)
raise name_err from ex
except FileNotFoundError as ex:
missing_file_err = MissingCondFile()
missing_file_err.add_file_context(file_path=cond_file_path)
raise missing_file_err from ex
finally:
self._tasks = None
self._current_cond_file_path = None
def _compile_scope(self):
scope = {}
# Create the task constructors for Conductor's foundational task types.
for raw_task_type in raw_task_types.values():
scope[raw_task_type.name] = self._wrap_task_function(
raw_task_type.load_from_cond_file
)
# We need to explicitly `compile()` the Conductor standard library
# files here to ensure that any uses of Conductor's foundational task
# types bind to the task constructors defined above.
for lib_file_path in STDLIB_FILES:
with open(lib_file_path, "r", encoding="UTF-8") as lib_file:
code = compile(lib_file.read(), str(lib_file_path), "exec")
# pylint: disable=exec-used
exec(code, scope)
return scope
def _wrap_task_function(self, task_constructor):
def shim(**kwargs):
raw_task = task_constructor(**kwargs)
raw_task["cond_file_path"] = self._current_cond_file_path
if raw_task["name"] in self._tasks:
raise DuplicateTaskName(task_name=raw_task["name"])
self._tasks[raw_task["name"]] = raw_task
return shim
|
the-stack_0_22548 | import imaplib
from bs4 import BeautifulSoup
import quopri
import pyttsx3
def LeitorNewLatter():
FROM_EMAIL = "seu email"
FROM_PWD = "sua senha"
SMTP_SERVER = "imap.gmail.com"
SMTP_PORT = 993
#Abrindo coneção
mail = imaplib.IMAP4_SSL(SMTP_SERVER, SMTP_PORT)
mail.login(FROM_EMAIL, FROM_PWD)
mail.select("Deschamps")
status, data = mail.search( None, "(UNSEEN)") # ALL / UNSEEN
for num in data[0].split():
status, data = mail.fetch(num, "(RFC822)")
EMAIL_MSG = data[0][1]
soup = BeautifulSoup(markup=EMAIL_MSG, features="html.parser")
news = soup.find_all("td")[0].text
utf = quopri.decodestring(news)
text = utf.decode('utf-8')
engine = pyttsx3.init()
engine.say("Bom dia Mestre!")
engine.say(text)
engine.runAndWait()
LeitorNewLatter()
|
the-stack_0_22552 | from collections import OrderedDict
import numpy as np
import torch
from torch.nn import functional as F
from gcam.backends.base import _BaseWrapper
from gcam import gcam_utils
# Changes the used method to hook into backward
ENABLE_MODULE_HOOK = False
class GradCAM(_BaseWrapper):
def __init__(self, model, target_layers=None, postprocessor=None, retain_graph=False):
"""
"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization"
https://arxiv.org/pdf/1610.02391.pdf
Look at Figure 2 on page 4
"""
super(GradCAM, self).__init__(model, postprocessor=postprocessor, retain_graph=retain_graph)
self.fmap_pool = OrderedDict()
self.grad_pool = OrderedDict()
self._target_layers = target_layers
if target_layers == 'full' or target_layers == 'auto':
target_layers = gcam_utils.get_layers(self.model)
elif isinstance(target_layers, str):
target_layers = [target_layers]
self.target_layers = target_layers
def _register_hooks(self):
"""Registers the forward and backward hooks to the layers."""
def forward_hook(key):
def forward_hook_(module, input, output):
self.registered_hooks[key][0] = True
# Save featuremaps
if not isinstance(output, torch.Tensor):
print("Cannot hook layer {} because its gradients are not in tensor format".format(key))
if not ENABLE_MODULE_HOOK:
def _backward_hook(grad_out):
self.registered_hooks[key][1] = True
# Save the gradients correspond to the featuremaps
self.grad_pool[key] = grad_out.detach()
# Register backward hook directly to the output
# Handle must be removed afterwards otherwise tensor is not freed
if not self.registered_hooks[key][1]:
_backward_handle = output.register_hook(_backward_hook)
self.backward_handlers.append(_backward_handle)
self.fmap_pool[key] = output.detach()
return forward_hook_
# This backward hook method looks prettier but is currently bugged in pytorch (04/25/2020)
# Handle does not need to be removed, tensors are freed automatically
def backward_hook(key):
def backward_hook_(module, grad_in, grad_out):
self.registered_hooks[key][1] = True
# Save the gradients correspond to the featuremaps
self.grad_pool[key] = grad_out[0].detach() # TODO: Still correct with batch size > 1?
return backward_hook_
self.remove_hook(forward=True, backward=True)
for name, module in self.model.named_modules():
if self.target_layers is None or name in self.target_layers:
self.registered_hooks[name] = [False, False]
self.forward_handlers.append(module.register_forward_hook(forward_hook(name)))
if ENABLE_MODULE_HOOK:
self.backward_handlers.append(module.register_backward_hook(backward_hook(name)))
def get_registered_hooks(self):
"""Returns every hook that was able to register to a layer."""
registered_hooks = []
for layer in self.registered_hooks.keys():
if self.registered_hooks[layer][0] and self.registered_hooks[layer][1]:
registered_hooks.append(layer)
self.remove_hook(forward=True, backward=True)
if self._target_layers == 'full' or self._target_layers == 'auto':
self.target_layers = registered_hooks
return registered_hooks
def forward(self, data):
"""Calls the forward() of the base."""
self._register_hooks()
return super(GradCAM, self).forward(data)
def generate(self):
"""Generates an attention map."""
self.remove_hook(forward=True, backward=True)
attention_maps = {}
if self._target_layers == "auto":
layer, fmaps, grads = self._auto_layer_selection()
self._check_hooks(layer)
attention_map = self._generate_helper(fmaps, grads, layer).cpu().numpy()
attention_maps = {layer: attention_map}
else:
for layer in self.target_layers:
self._check_hooks(layer)
if self.registered_hooks[layer][0] and self.registered_hooks[layer][1]:
fmaps = self._find(self.fmap_pool, layer)
grads = self._find(self.grad_pool, layer)
attention_map = self._generate_helper(fmaps, grads, layer)
attention_maps[layer] = attention_map.cpu().numpy()
if not attention_maps:
raise ValueError("None of the hooks registered to the target layers")
return attention_maps
def _auto_layer_selection(self):
"""Selects the last layer from which attention maps can be generated."""
# It's ugly but it works ;)
module_names = self.layers(reverse=True)
found_valid_layer = False
counter = 0
for layer in module_names:
try:
fmaps = self._find(self.fmap_pool, layer)
grads = self._find(self.grad_pool, layer)
nonzeros = np.count_nonzero(grads.detach().cpu().numpy()) # TODO: Add except here with description, replace nonzero with sum == 0?
self._compute_grad_weights(grads)
if nonzeros == 0 or not isinstance(fmaps, torch.Tensor) or not isinstance(grads, torch.Tensor):
counter += 1
continue
print("Selected module layer: {}".format(layer))
found_valid_layer = True
break
except ValueError:
counter += 1
except RuntimeError:
counter += 1
except IndexError:
counter += 1
if not found_valid_layer:
raise ValueError("Could not find a valid layer. "
"Check if base.logits or the mask result of base._mask_output() contains only zeros. "
"Check if requires_grad flag is true for the batch input and that no torch.no_grad statements effects gcam. "
"Check if the model has any convolution layers.")
return layer, fmaps, grads
def _find(self, pool, target_layer):
"""Returns the feature maps or gradients for a specific layer."""
if target_layer in pool.keys():
return pool[target_layer]
else:
raise ValueError("Invalid layer name: {}".format(target_layer))
def _compute_grad_weights(self, grads):
"""Computes the weights based on the gradients by average pooling."""
if self.input_dim == 2:
return F.adaptive_avg_pool2d(grads, 1)
else:
return F.adaptive_avg_pool3d(grads, 1)
def _generate_helper(self, fmaps, grads, layer):
weights = self._compute_grad_weights(grads)
attention_map = torch.mul(fmaps, weights)
B, _, *data_shape = attention_map.shape
try:
attention_map = attention_map.view(B, self.output_channels, -1, *data_shape)
except RuntimeError:
raise RuntimeError("Number of set channels ({}) is not a multiple of the feature map channels ({}) in layer: {}".format(self.output_channels, fmaps.shape[1], layer))
attention_map = torch.sum(attention_map, dim=2)
attention_map = F.relu(attention_map)
attention_map = self._normalize_per_channel(attention_map)
return attention_map
def _check_hooks(self, layer):
"""Checks if all hooks registered."""
if not self.registered_hooks[layer][0] and not self.registered_hooks[layer][1]:
raise ValueError("Neither forward hook nor backward hook did register to layer: " + str(layer))
elif not self.registered_hooks[layer][0]:
raise ValueError("Forward hook did not register to layer: " + str(layer))
elif not self.registered_hooks[layer][1]:
raise ValueError("Backward hook did not register to layer: " + str(layer) + ", Check if the hook was registered to a layer that is skipped during backward and thus no gradients are computed")
|
the-stack_0_22554 | import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import copy
if __name__ == '__main__':
path = "../data/camseq01/label_colors.txt"
color_list = []
with open(path, 'r') as f:
for l in f.readlines():
l = l.strip().replace('\t', ' ').split(' ')
r, g, b, k = l[0], l[1], l[2], l[3]
color_list.append([int(b), int(g), int(r)])
img_list = []
for root, dirs, files in os.walk("../data/camseq01"):
for f in files:
if f.endswith('txt'):
continue
if not f.endswith("_L.png"):
continue
abs_path = os.path.join(root, f)
img_list.append(abs_path)
dest_dir = "../data/mask/"
n_img = None
n_mask = "{}"
for f in img_list:
img = cv2.imread(f)
base_name = f.split("/")[-1].replace('.png', '')
h, w, c = img.shape
for n in range(len(color_list)):
color = np.array(color_list[n], dtype=np.uint8)
mask = copy.deepcopy(img)
for i in range(h):
for j in range(w):
if not ((mask[i, j, :] == color).all()):
mask[i, j, :] = np.array([0, 0, 0], dtype=np.uint8)
else:
mask[i, j, :] = np.array([255, 255, 255], dtype=np.uint8)
n_p = dest_dir + base_name + n_mask.format(n) + '.png'
cv2.imwrite(n_p, mask)
|
the-stack_0_22555 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 12 15:13:03 2020
@author: dmmoria
"""
### VARIABLE NAMES ###
def get_workbook_name():
NAME_WORKBOOK = 'Consequence_of_Failure_Inputs_Test.xlsx'
return NAME_WORKBOOK
def get_excel_vars():
VARS_EXCL = {
# Excel Column Names
'NAME_COLUMN_TYPE':'dist_type',
'NAME_COLUMN_DEPENDENCE':'dependence',
# Constant Value Names
'NAME_DIST_VALUE':'value',
# Uniform Distribution Names
'NAME_DIST_UNIFORM':'uniform',
'NAME_DIST_UNIFORM_MIN':'min',
'NAME_DIST_UNIFORM_MAX':'max',
# Normal Distribution Names
'NAME_DIST_NORMAL':'normal',
'NAME_DIST_NORMAL_MEAN':'mean_value',
'NAME_DIST_NORMAL_STD':'std_dev'
}
return VARS_EXCL |
the-stack_0_22557 | #!/usr/bin/env python
"""Ninja build configurator for foundation library example program"""
import sys
import os
sys.path.insert(0, os.path.join('build', 'ninja'))
import generator
project = 'example'
sources = ['main.c']
libs = ['foundation']
includepaths = ['..'] # For foundation library
libpaths = ['..'] # For foundation library
generator = generator.Generator(project = project, variables = [('bundleidentifier', 'com.rampantpixels.foundation.$(binname)')])
if generator.target.is_macosx() or generator.target.is_ios() or generator.target.is_android() or generator.target.is_tizen() or generator.target.is_pnacl():
resources = []
extrasources = []
if generator.target.is_ios():
resources = [os.path.join('ios', item) for item in [
'example.plist', 'Images.xcassets', 'example.xib'
]]
extrasources = [os.path.join('ios', item) for item in [
'viewcontroller.m'
]]
elif generator.target.is_android():
resources = [os.path.join('android', item) for item in [
'AndroidManifest.xml', os.path.join('layout', 'main.xml'), os.path.join('values', 'strings.xml'),
os.path.join('drawable-ldpi', 'icon.png'), os.path.join('drawable-mdpi', 'icon.png'), os.path.join('drawable-hdpi', 'icon.png'),
os.path.join('drawable-xhdpi', 'icon.png'), os.path.join('drawable-xxhdpi', 'icon.png'), os.path.join('drawable-xxxhdpi', 'icon.png')
]]
extrasources = [os.path.join('android', 'java', 'com', 'rampantpixels', 'foundation', 'example', item) for item in [
'ExampleActivity.java'
]]
elif generator.target.is_tizen():
resources = [os.path.join('tizen', item) for item in [
'tizen-manifest.xml', os.path.join('res', 'tizenapp.png')
]]
generator.app(module = project, sources = sources + extrasources, binname = project, libs = libs, resources = resources, includepaths = includepaths, libpaths = libpaths)
else:
generator.bin(module = project, sources = sources, binname = project, libs = libs, includepaths = includepaths, libpaths = libpaths)
|
the-stack_0_22558 | import pickle
from loguru import logger
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
def train_nn_bins(X, Y):
"""Trains the neural network with time bins. X,Y are numpy 2d-arrays.
Features:
- Onehot of Locations. A location is the tuple (line, direction, stop)
- Onhot day. A day is a number between 0-6
- Onehot time-bin. Per default the timebins are 30min long
Labels:
- Occupation: float
"""
_, d = X.shape
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(units=16, activation="relu", input_dim=d,))
model.add(tf.keras.layers.Dense(units=1))
sgd = tf.keras.optimizers.SGD(learning_rate=0.005, momentum=0.00, nesterov=False)
model.compile(
loss="mean_squared_error", optimizer=sgd, metrics=["mean_squared_error"]
)
logger.info(model.summary())
model.fit(X, Y, batch_size=128, epochs=8)
return model
def train_models():
processed_location = "data/vbz_predictions/processed"
model_location = "models/vbz_predictions"
X = pickle.load(open(f"{processed_location}/X_nn_bins.pkl", "rb"))
Y = pickle.load(open(f"{processed_location}/Y_nn_bins.pkl", "rb"))
logger.info("Training the vbz predictions NN Bins model")
model = train_nn_bins(X, Y)
model.reset_metrics()
model.save(f"{model_location}/model_nn_bins.h5")
if __name__ == "__main__":
train_models()
|
the-stack_0_22560 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vectorized differentially private optimizers for TensorFlow."""
from absl import logging
import tensorflow as tf
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
def make_vectorized_optimizer_class(cls):
"""Given a subclass of `tf.compat.v1.train.Optimizer`, returns a vectorized DP-SGD subclass of it.
Args:
cls: Class from which to derive a DP subclass. Should be a subclass of
`tf.compat.v1.train.Optimizer`.
Returns:
A DP-SGD subclass of `cls`.
"""
child_code = cls.compute_gradients.__code__
if child_code is not parent_code:
logging.warning(
'WARNING: Calling make_optimizer_class() on class %s that overrides '
'method compute_gradients(). Check to ensure that '
'make_optimizer_class() does not interfere with overridden version.',
cls.__name__)
class DPOptimizerClass(cls): # pylint: disable=empty-docstring
__doc__ = ("""Vectorized DP subclass of `{base_class}` using Gaussian
averaging.
You can use this as a differentially private replacement for
`{base_class}`. This optimizer implements DP-SGD using
the standard Gaussian mechanism. It differs from `{dp_class}` in that
it attempts to vectorize the gradient computation and clipping of
microbatches.
When instantiating this optimizer, you need to supply several
DP-related arguments followed by the standard arguments for
`{short_base_class}`.
Examples:
```python
# Create optimizer.
opt = {dp_vectorized_class}(l2_norm_clip=1.0, noise_multiplier=0.5, num_microbatches=1,
<standard arguments>)
```
When using the optimizer, be sure to pass in the loss as a
rank-one tensor with one entry for each example.
```python
# Compute loss as a tensor. Do not call tf.reduce_mean as you
# would with a standard optimizer.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
train_op = opt.minimize(loss, global_step=global_step)
```
""").format(
base_class='tf.compat.v1.train.' + cls.__name__,
dp_class='DP' +
cls.__name__.replace('Optimizer', 'GaussianOptimizer'),
short_base_class=cls.__name__,
dp_vectorized_class='VectorizedDP' + cls.__name__)
def __init__(
self,
l2_norm_clip,
noise_multiplier,
num_microbatches=None,
*args, # pylint: disable=keyword-arg-before-vararg, g-doc-args
**kwargs):
"""Initialize the DPOptimizerClass.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: Number of microbatches into which each minibatch is
split. If `None`, will default to the size of the minibatch, and
per-example gradients will be computed.
*args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method.
"""
super().__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches
def compute_gradients(self,
loss,
var_list,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None,
gradient_tape=None):
"""DP-SGD version of base class method."""
if callable(loss):
# TF is running in Eager mode
raise NotImplementedError('Vectorized optimizer unavailable for TF2.')
else:
# TF is running in graph mode, check we did not receive a gradient tape.
if gradient_tape:
raise ValueError('When in graph mode, a tape should not be passed.')
batch_size = tf.shape(input=loss)[0]
if self._num_microbatches is None:
self._num_microbatches = batch_size
# Note: it would be closer to the correct i.i.d. sampling of records if
# we sampled each microbatch from the appropriate binomial distribution,
# although that still wouldn't be quite correct because it would be
# sampling from the dataset without replacement.
microbatch_losses = tf.reshape(loss, [self._num_microbatches, -1])
if var_list is None:
var_list = (
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
def process_microbatch(microbatch_loss):
"""Compute clipped grads for one microbatch."""
microbatch_loss = tf.reduce_mean(input_tensor=microbatch_loss)
grads, _ = zip(*super(DPOptimizerClass, self).compute_gradients(
microbatch_loss, var_list, gate_gradients, aggregation_method,
colocate_gradients_with_ops, grad_loss))
grads_list = [
g if g is not None else tf.zeros_like(v)
for (g, v) in zip(list(grads), var_list)
]
# Clip gradients to have L2 norm of l2_norm_clip.
# Here, we use TF primitives rather than the built-in
# tf.clip_by_global_norm() so that operations can be vectorized
# across microbatches.
grads_flat = tf.nest.flatten(grads_list)
squared_l2_norms = [
tf.reduce_sum(input_tensor=tf.square(g)) for g in grads_flat
]
global_norm = tf.sqrt(tf.add_n(squared_l2_norms))
div = tf.maximum(global_norm / self._l2_norm_clip, 1.)
clipped_flat = [g / div for g in grads_flat]
clipped_grads = tf.nest.pack_sequence_as(grads_list, clipped_flat)
return clipped_grads
clipped_grads = tf.vectorized_map(process_microbatch, microbatch_losses)
def reduce_noise_normalize_batch(stacked_grads):
summed_grads = tf.reduce_sum(input_tensor=stacked_grads, axis=0)
noise_stddev = self._l2_norm_clip * self._noise_multiplier
noise = tf.random.normal(
tf.shape(input=summed_grads), stddev=noise_stddev)
noised_grads = summed_grads + noise
return noised_grads / tf.cast(self._num_microbatches, tf.float32)
final_grads = tf.nest.map_structure(reduce_noise_normalize_batch,
clipped_grads)
return list(zip(final_grads, var_list))
return DPOptimizerClass
VectorizedDPAdagradOptimizer = make_vectorized_optimizer_class(AdagradOptimizer)
VectorizedDPAdamOptimizer = make_vectorized_optimizer_class(AdamOptimizer)
VectorizedDPSGDOptimizer = make_vectorized_optimizer_class(
GradientDescentOptimizer)
VectorizedDPAdagrad = VectorizedDPAdagradOptimizer
VectorizedDPAdam = VectorizedDPAdamOptimizer
VectorizedDPSGD = VectorizedDPSGDOptimizer
|
the-stack_0_22562 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import network
from nova.network import model as network_model
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
def image_type(image_type):
"""Converts to a three letter image type.
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
def id_to_glance_id(context, image_id):
"""Convert an internal (db) id to a glance id."""
return db.s3_image_get(context, image_id)['uuid']
def glance_id_to_id(context, glance_id):
"""Convert a glance id to an internal (db) id."""
if glance_id is None:
return
try:
return db.s3_image_get_by_uuid(context, glance_id)['id']
except exception.NotFound:
return db.s3_image_create(context, glance_id)['id']
def ec2_id_to_glance_id(context, ec2_id):
image_id = ec2_id_to_id(ec2_id)
return id_to_glance_id(context, image_id)
def glance_id_to_ec2_id(context, glance_id, image_type='ami'):
image_id = glance_id_to_id(context, glance_id)
return image_ec2_id(image_id, image_type=image_type)
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
raise exception.InvalidEc2Id(ec2_id=ec2_id)
def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
try:
return id_to_ec2_id(image_id, template=template)
except ValueError:
#TODO(wwolf): once we have ec2_id -> glance_id mapping
# in place, this wont be necessary
return "ami-00000000"
def get_ip_info_for_instance_from_nw_info(nw_info):
ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
for vif in nw_info:
vif_fixed_ips = vif.fixed_ips()
fixed_ips = [ip['address']
for ip in vif_fixed_ips if ip['version'] == 4]
fixed_ip6s = [ip['address']
for ip in vif_fixed_ips if ip['version'] == 6]
floating_ips = [ip['address']
for ip in vif.floating_ips()]
ip_info['fixed_ips'].extend(fixed_ips)
ip_info['fixed_ip6s'].extend(fixed_ip6s)
ip_info['floating_ips'].extend(floating_ips)
return ip_info
def get_ip_info_for_instance(context, instance):
"""Return a dictionary of IP information for an instance"""
info_cache = instance['info_cache'] or {}
cached_nwinfo = info_cache.get('network_info')
# Make sure empty response is turned into []
if not cached_nwinfo:
cached_nwinfo = []
nw_info = network_model.NetworkInfo.hydrate(cached_nwinfo)
return get_ip_info_for_instance_from_nw_info(nw_info)
def get_availability_zone_by_host(services, host):
if len(services) > 0:
return services[0]['availability_zone']
return 'unknown zone'
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
return template % int(instance_id)
def id_to_ec2_snap_id(instance_id):
"""Convert an snapshot ID (int) to an ec2 snapshot ID
(snap-[base 16 number])"""
return id_to_ec2_id(instance_id, 'snap-%08x')
def id_to_ec2_vol_id(instance_id):
"""Convert an volume ID (int) to an ec2 volume ID (vol-[base 16 number])"""
return id_to_ec2_id(instance_id, 'vol-%08x')
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
0xN, -0xN int from hex (positive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
if len(value) == 0:
return ''
if value == 'None':
return None
lowered_value = value.lower()
if lowered_value == 'true':
return True
if lowered_value == 'false':
return False
valueneg = value[1:] if value[0] == '-' else value
if valueneg == '0':
return 0
if valueneg == '':
return value
if valueneg[0] == '0':
if valueneg[1] in 'xX':
return int(value, 16)
elif valueneg[1] in 'bB':
return int(value, 2)
else:
try:
return int(value, 8)
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
try:
return complex(value)
except ValueError:
return value
def dict_from_dotted_str(items):
"""parse multi dot-separated argument into dict.
EBS boot uses multi dot-separated arguments like
BlockDeviceMapping.1.DeviceName=snap-id
Convert the above into
{'block_device_mapping': {'1': {'device_name': snap-id}}}
"""
args = {}
for key, value in items:
parts = key.split(".")
key = str(camelcase_to_underscore(parts[0]))
if isinstance(value, str) or isinstance(value, unicode):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
args[key] = d
for k in parts[1:-1]:
k = camelcase_to_underscore(k)
v = d.get(k, {})
d[k] = v
d = v
d[camelcase_to_underscore(parts[-1])] = value
else:
args[key] = value
return args
|
the-stack_0_22565 | import logging
import venusian
from aiohttp import hdrs
log = logging.getLogger(__name__)
class plugin:
def __init__(self, method=None, path=None,
add_routes=False,
*args, **kwargs):
self._method = method
self._path = path
self._add_routes = add_routes
self._args = args
self._kwargs = kwargs
def __call__(self, func):
def callback(scanner, name, ob):
log.debug("name %s", name)
if self._method is not None and self._path is not None:
scanner.router.add_route(self._method, self._path, ob,
*self._args, **self._kwargs)
elif self._add_routes:
ob(scanner.app)
venusian.attach(func, callback)
return func
@classmethod
def route(cls, method, path, *args, **kwargs):
return cls(method, path, *args, **kwargs)
@classmethod
def get(cls, path, *args, **kwargs):
return cls(hdrs.METH_GET, path, *args, **kwargs)
@classmethod
def add_routes(cls):
return cls(add_routes=True)
|
the-stack_0_22566 | # -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for renku service controllers."""
import tempfile
from contextlib import contextmanager
from urllib.parse import urlparse
from marshmallow import EXCLUDE
from renku.core import errors
from renku.core.metadata.repository import Repository
from renku.core.utils.contexts import click_context
from renku.service.serializers.cache import ProjectCloneContext
ANONYMOUS_SESSION = "anonymous"
class RemoteProject:
"""Parent controller for all controllers with remote support."""
def __init__(self, user_data, request_data):
"""Construct remote controller."""
if not user_data:
user_data = {
"owner": f"{ANONYMOUS_SESSION} session",
"name": f"{ANONYMOUS_SESSION}",
"token": f"{ANONYMOUS_SESSION}",
}
self.ctx = ProjectCloneContext().load({**user_data, **request_data}, unknown=EXCLUDE)
self.git_url = self.ctx["url_with_auth"]
self.branch = self.ctx["ref"]
@property
def remote_url(self):
"""Construct project metadata remote path."""
url = urlparse(self.git_url)
if url.scheme not in ("http", "https"):
return url
if not url.netloc:
raise ValueError("netloc unknown")
return url
@contextmanager
def remote(self):
"""Retrieve project metadata."""
with tempfile.TemporaryDirectory() as td:
try:
Repository.clone_from(self.remote_url.geturl(), td, branch=self.branch, depth=1)
except errors.GitCommandError as e:
msg = str(e)
if "is not a commit and a branch" in msg and "cannot be created from it" in msg:
raise errors.UninitializedProject(td) from e # NOTE: Project has no commits to check out
raise
with click_context(td, "remote_project"):
yield td
|
the-stack_0_22568 | # -*- coding: utf-8 -*-
import scrapy
from mall_spider.items import Product
import json
from jsonpath import jsonpath
from scrapy_redis.spiders import RedisSpider
import pickle
class JdProductSpider(RedisSpider):
name = 'jd_product'
allowed_domains = ['jd.com', '3.cn']
#用于指定起始url列表,在redis数据库中key
redis_key = 'jd_product:category'
# start_urls = ['http://jd.com/']
# def start_requests(self):
# category = {"b_category_name": "家用电器",
# "b_category_url": "https://jiadian.jd.com",
# "m_category_name": "电视",
# "m_category_url": "https://list.jd.com/list.html?cat=737,794,798",
# "s_category_name": "曲面电视",
# "s_category_url": "https://list.jd.com/list.html?cat=737,794,798&ev=4155_92263&sort=sort_rank_asc&trans=1&JL=6_0_0"
#
# }
# yield scrapy.Request(category['s_category_url'],callback=self.parse,meta={'category': category})
def make_request_from_data(self, data):
category = pickle.loads(data)
#根据小分类的URL,构建列表页的请求
#注意:要使用return来返回一个请求,不能使用yield
return scrapy.Request(category['s_category_url'], callback=self.parse, meta={'category': category})
def parse(self, response):
category = response.meta['category']
# print(category)
sku_ids = response.xpath('//div[contains(@class," j-sku-item")]/@data-sku').extract()
for sku_id in sku_ids:
item = Product()
item['product_category'] = category
item['product_sku_id'] = sku_id
#构建商品详情页请求
product_base_url = 'https://cdnware.m.jd.com/c1/skuDetail/apple/7.3.0/{}.json'.format(sku_id)
yield scrapy.Request(product_base_url, callback=self.parse_product_base, meta={'item': item})
#获取商品下一页的URL
next_url = response.xpath('//a[@class="pn-next"]/@href').extract_first()
if next_url:
#补全url
next_url = response.urljoin(next_url)
# print(next_url)
#构建下一页的请求
yield scrapy.Request(next_url, callback=self.parse, meta={'category': category})
def parse_product_base(self, response):
#取出传递过来的数据
item = response.meta['item']
# print(item)
# print(response.text)
#把json字符串转换成字典
result = json.loads(response.text)
item['product_name'] = result['wareInfo']['basicInfo']['name']
item['product_img_url'] = result['wareInfo']['basicInfo']['wareImage'][0]['small']
item['product_book_info'] = result['wareInfo']['basicInfo']['bookInfo']
color_size = jsonpath(result, '$..colorSize')
if color_size:
color_size = color_size[0]
product_option = {}
for option in color_size:
title = option['title']
value = jsonpath(option, '$..text')
product_option[title] = value
item['product_option'] = product_option
#商品店铺
shop = jsonpath(result, '$..shop')
if shop:
shop = shop[0]
if shop:
item['product_shop'] = {
'shop_id': shop['shopId'],
'shop_name': shop['name'],
'shop_score': shop['score']
}
else:
item['product_shop'] = {
'shop_name': '京东自营',
}
#商品类别ID
item['product_category_id'] = result['wareInfo']['basicInfo']['category']
item['product_category_id'] = item['product_category_id'].replace(';',',')
# print(item)
#商品促销信息URL
ad_url = 'https://cd.jd.com/promotion/v2?skuId={}&area=1_72_4137_0&cat={}'\
.format(item['product_sku_id'], item['product_category_id'])
yield scrapy.Request(ad_url, callback=self.parse_product_ad, meta={'item': item})
def parse_product_ad(self, response):
item = response.meta['item']
# print(item)
# print(response.body.decode('GBK'))
result = json.loads(response.body.decode('utf-8'))
item['product_ad'] = jsonpath(result, '$..ad')[0] if jsonpath(result, '$..ad') else ''
# print(item)
#构建评价信息
comments_url = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds={}'.\
format(item['product_sku_id'])
yield scrapy.Request(comments_url, callback=self.parse_product_comments,meta={'item': item})
def parse_product_comments(self, response):
item = response.meta['item']
# print(item)
# print(response.text)
result = json.loads(response.text)
item['product_comments'] = {
'CommentCount': jsonpath(result, '$..CommentCount')[0],
'GoodCount': jsonpath(result, '$..GoodCount')[0],
'PoorCount': jsonpath(result, '$..PoorCount')[0],
'GoodRate': jsonpath(result, '$..GoodRate')[0],
}
# print(item)
#构建价格请求
price_url = 'https://p.3.cn/prices/mgets?skuIds=J_{}'.format(item['product_sku_id'])
yield scrapy.Request(price_url, callback=self.parse_product_price, meta={'item': item})
def parse_product_price(self, response):
item = response.meta['item']
# print(response.text)
result = json.loads(response.text)
item['product_price'] = result[0]['p']
#把商品数据交给引擎
yield item
|
the-stack_0_22569 | import re
import requests
from bs4 import BeautifulSoup
# All of the Byzantine Emperor names live here
url = 'https://en.wikipedia.org/w/index.php?title=List_of_Byzantine_emperors&oldid=1007718930'
# Pull the raw html from this page
with requests.Session() as s:
response = s.get(url, timeout=5).text
# Use BeautifulSoup to interpret it
soup = BeautifulSoup(response, 'html.parser')
table = soup.find_all('table')[1]
# Let's pull all the names into a dictionary of lists
names = []
greek = []
latin = []
def name_parser(wiki_text: str) -> dict:
"""
This function will try to be a catch all for the text we expect in a name cell.
Hopefully it doesn't change any time soon...
:param wiki_text: str, the text in the name cell
:return: dict, with key names, greek, latin, alt, alt_greek, alt_latin
"""
# init
text, _, alt = wiki_text.lower().rstrip().partition('formally')
pattern = r'([^\(\)\,]+)\(?([^\(\)\,]+)?,?([^\(\)\,]+)?\)?'
fields = ['name', 'greek', 'latin']
alt_fields = ['alt_name', 'alt_greek', 'alt_latin']
# parse cell structure
matches = [item.strip()+"." for item in re.findall(pattern, text)[0] if item != '']
alt_matches = [item.strip()+"." for item in re.findall(pattern, alt)[0] if item != ''] if alt else []
# add to dictionary
d1, d2 = dict(zip(fields, matches)), dict(zip(alt_fields, alt_matches))
return {**d1, **d2}
greek_char_map = {
'ὴ': 'η',
'ή': 'η',
'ἡ': 'η',
'ὸ': 'ο',
'ό': 'ο',
'ὁ': 'ο',
'ἴ': 'ι',
'ἰ': 'ι',
'ί': 'ι',
'ῖ': 'ι',
'ῶ': 'ω',
'ώ': 'ω',
'ὐ': 'υ',
'ύ': 'υ',
'a': 'α',
'ἄ': 'α',
'ᾷ': 'α',
'ἀ': 'α',
'ά': 'α',
'ᾶ': 'α',
'έ': 'ε',
'ἐ': 'ε',
'b': 'β',
'ῥ': 'ρ',
'ʹ': '',
'/': '',
}
for row in table.find_all('tr'):
cells = row.find_all('td')
if len(cells) == 4:
d = name_parser(cells[1].text)
# Of course there is one exception that breaks everything
names.append(d['name'].replace('-', ' '))
temp_s = d['greek'].replace('greek: ', '').replace('[', '').replace(']', '')
for key in greek_char_map:
temp_s = temp_s.replace(key, greek_char_map[key])
greek.append(d['name'].replace('-', ' ') + temp_s)
# These may not appear in the cell
try:
latin.append(d['latin'].replace('latin: ', ''))
except KeyError:
pass
try:
names.append(d['alt_name'])
except KeyError:
pass
try:
temp_s = d['alt_greek'].replace('greek: ', '').replace('[', '').replace(']', '')
for key in greek_char_map:
temp_s = temp_s.replace(key, greek_char_map[key])
greek.append(d['name'].replace('-', ' ') + temp_s)
except KeyError:
pass
try:
latin.append(d['alt_latin'])
except KeyError:
pass
if __name__ == "__main__":
print(names)
print("*"*10)
print(greek)
print("*" * 10)
print(latin)
print("*" * 10)
s = set()
for t in greek:
s |= set(t)
print(s)
print(len(max(greek, key=len)))
|
the-stack_0_22571 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import copy
import datetime
import time
from collections import defaultdict
import numpy as np
from pycocotools import mask as maskUtils
class COCOeval:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# HtRng - [...] A=4 object height ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple HtRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.evalImgs = defaultdict(
list
) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if cocoGt is not None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
p = self.params
if p.useCats:
gts = self.cocoGt.loadAnns(
self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts = self.cocoDt.loadAnns(
self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(
list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.
format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, height range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [
evaluateImg(imgId, catId, HtRng, maxDet) for catId in catIds
for HtRng in p.HtRng for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc - tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d, g, iscrowd)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = p.kpt_oks_sigmas
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]
x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]
y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]
yd = d[1::3]
if k1 > 0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0 - xd), axis=0) + np.max(
(z, xd - x1), axis=0)
dy = np.max((z, y0 - yd), axis=0) + np.max(
(z, yd - y1), axis=0)
e = (dx**2 + dy**2) / vars / (gt['area'] + np.spacing(1)) / 2
if k1 > 0:
e = e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
for g in gt:
box_height = g['bbox'][2]
if g['ignore'] or (box_height < aRng[0] or box_height > aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(
self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T, D))
if not len(ious) == 0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
# continue to next gt unless better match made
if ious[dind, gind] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]['id']
gtm[tind, m] = d['id']
# set unmatched detections outside of height range to ignore
a = np.array([
d['height'] < aRng[0] or d['height'] > aRng[1] for d in dt
]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T,
0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p=None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.HtRng)
M = len(p.maxDets)
precision = -np.ones(
(T, R, K, A, M)) # -1 for the precision of absent categories
recall = -np.ones((T, K, A, M))
scores = -np.ones((T, R, K, A, M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.HtRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [
n for n, a in enumerate(map(lambda x: tuple(x), p.HtRng))
if a in setA
]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.HtRng)
# retrieve E at each category, height range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if e is not None]
if len(E) == 0:
continue
dtScores = np.concatenate(
[e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate(
[e['dtMatches'][:, 0:maxDet] for e in E], axis=1
)[:, inds]
dtIg = np.concatenate(
[e['dtIgnore'][:, 0:maxDet] for e in E], axis=1
)[:, inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm),
np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R, ))
ss = np.zeros((R, ))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except: # noqa
pass
precision[t, :, k, a, m] = np.array(q)
scores[t, :, k, a, m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc - tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize(ap=1, iouThr=None, HtRng='all', maxDets=100):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | height={:>6s} | maxDets={:>4d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.HtRngLbl) if aRng == HtRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
print(
iStr.format(titleStr, typeStr, iouStr, HtRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12, ))
stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1,
iouThr=.75,
maxDets=self.params.maxDets[2])
stats[3] = _summarize(1,
HtRng='small',
maxDets=self.params.maxDets[2])
stats[4] = _summarize(1,
HtRng='medium',
maxDets=self.params.maxDets[2])
stats[5] = _summarize(1,
HtRng='large',
maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0,
HtRng='small',
maxDets=self.params.maxDets[2])
stats[10] = _summarize(0,
HtRng='medium',
maxDets=self.params.maxDets[2])
stats[11] = _summarize(0,
HtRng='large',
maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10, ))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, HtRng='medium')
stats[4] = _summarize(1, maxDets=20, HtRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, HtRng='medium')
stats[9] = _summarize(0, maxDets=20, HtRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5,
0.95,
int(np.round((0.95 - .5) / .05)) + 1,
endpoint=True)
self.recThrs = np.linspace(.0,
1.00,
int(np.round((1.00 - .0) / .01)) + 1,
endpoint=True)
self.maxDets = [10, 100, 1000]
self.HtRng = [[10, 1e5], [10, 50], [50, 300], [300, 1e5]]
self.HtRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5,
0.95,
int(np.round((0.95 - .5) / .05)) + 1,
endpoint=True)
self.recThrs = np.linspace(.0,
1.00,
int(np.round((1.00 - .0) / .01)) + 1,
endpoint=True)
self.maxDets = [20]
self.HtRng = [[10, 1e5], [50, 300], [300, 1e5]]
self.HtRngLbl = ['all', 'medium', 'large']
self.useCats = 1
self.kpt_oks_sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07,
.87, .87, .89, .89
]) / 10.0
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None
|
the-stack_0_22574 | import torch
class SelfAttention(torch.nn.Module):
def __init__(self,
num_heads,
model_dim,
dropout_keep_prob):
super(SelfAttention, self).__init__()
self.num_heads = num_heads
self.model_dim = model_dim
self.dropout_keep_prob = dropout_keep_prob
self.q_layer = torch.nn.Linear(model_dim, model_dim * self.num_heads, bias=False)
self.out_layer = torch.nn.Linear(model_dim * self.num_heads, model_dim, bias=False)
self.out_layer2 = torch.nn.Linear(model_dim * 2, model_dim, bias=False)
self.relu = torch.nn.ReLU()
self.softmax = torch.nn.Softmax(dim=-1)
self.dropout = torch.nn.Dropout(1- dropout_keep_prob)
def forward(self, batched_inputs, attn_mask=None):
q = self._linear_projection(batched_inputs)
qs = self._split_heads(q)
tiled_inputs = batched_inputs.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
outputs = self._scaled_dot_product(qs, tiled_inputs, tiled_inputs, attn_mask) # (batch, num_heads, max_contexts, value_dim)
outputs = self._concat_heads(outputs) # (batch, max_contexts, value_dim * num_heads)
if self.num_heads > 1:
outputs = self.out_layer(outputs) # (batch, max_contexts, model_dim)
outputs = self.relu(outputs) # (batch, max_contexts, model_dim)
#outputs = self.dropout(outputs)
outputs = torch.cat([outputs, batched_inputs], dim=-1) # (batch, max_contexts, 2 * model_dim)
outputs = self.out_layer2(outputs) # (batch, max_contexts, model_dim)c
outputs = self.relu(outputs) # (batch, max_contexts, model_dim)
return outputs
def _linear_projection(self, batched_inputs):
q = self.q_layer(batched_inputs) # (batch, max_contexts, key_dim * num_heads)
# k = tf.layers.dense(batched_inputs, units=self.model_dim,
# use_bias=False) # (batch, max_contexts, key_dim * num_heads)
return q
def _split_heads(self, q):
def split_last_dimension_then_transpose(tensor, num_heads, dim):
tensor = tensor.view([-1, tensor.size()[1], num_heads,
dim]) # (batch, max_contexts, num_heads, dim)
return tensor.transpose(1,2) # (batch, num_heads, max_contexts, dim)
qs = split_last_dimension_then_transpose(q, self.num_heads,
self.model_dim) # (batch, num_heads, max_contexts, key_dim)
# ks = split_last_dimension_then_transpose(k, self.num_heads,
# self.model_dim) # (batch, num_heads, max_contexts, key_dim)
return qs
def _scaled_dot_product(self, qs, ks, tiled_inputs, valid_mask):
queries_dot_keys = torch.matmul(qs, ks.transpose(2,3)) # (batch, num_heads, max_contexts, max_contexts)
scaled_scores = queries_dot_keys #/ ((self.model_dim // self.num_heads) ** 0.5) # (batch, num_heads, max_contexts, max_contexts)
if valid_mask is not None:
mask = torch.log(valid_mask.view(valid_mask.size()[0], 1, 1, valid_mask.size()[1])) # (batch, 1, 1, max_contexts)
scaled_scores += mask
attention_weights = self.softmax(scaled_scores) # (batch, num_heads, max_contexts, max_contexts)
return torch.matmul(attention_weights, tiled_inputs) # (batch, num_heads, max_contexts, value_dim)
def _concat_heads(self, outputs):
# outputs: (batch, num_heads, max_contexts, value_dim)
max_contexts = outputs.size()[2]
tensor = outputs.transpose(1, 2) # [batch, max_contexts, num_heads, value_dim]
return tensor.contiguous().view([-1, max_contexts, self.model_dim * self.num_heads])
|
the-stack_0_22575 | import json
import logging
import os
import socks
import telegram
import youtube_dl
from dataclasses import dataclass
from functools import wraps
from telegram import MessageEntity
from urllib.request import urlopen
from telegram.ext import Updater
from telegram.ext import CommandHandler, MessageHandler, Filters
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.WARNING)
CHATS = json.loads(os.environ["CHATS"])
USERS = json.loads(os.environ["USERS"])
def restricted(func):
@wraps(func)
def wrapped(update, context, *args, **kwargs):
if update.effective_user:
user_id = update.effective_user.id
if user_id not in USERS:
context.bot.send_message(chat_id=update.effective_chat.id, text="You are unathorized to use this bot")
print(f'Unauthorized user: {user_id}')
return
else:
chat_id = update.effective_chat.id
if chat_id not in CHATS:
print(f'Unauthorized chat: {chat_id}')
return
return func(update, context, *args, **kwargs)
return wrapped
class Error(Exception):
pass
@dataclass
class FileInfo:
filename: str
title: str
def download_audiofile(video_url):
"""Download audio for video_url and convert it into mp3 format"""
ydl_opts = {
'outtmpl': os.path.join(os.environ["MEDIADIR"], '%(title)s.%(ext)s'),
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'm4a'
}]
}
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info = ydl.extract_info(video_url, download=True)
return FileInfo(f'{os.path.splitext(ydl.prepare_filename(info))[0]}.m4a', info['title'])
except:
return None
@restricted
def bot_start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="Welcome! Please send me a link to the video")
@restricted
def bot_download(update, context):
progress_msg = context.bot.send_message(
chat_id=update.effective_chat.id,
text="Downloading...",
disable_notification=True)
try:
video_url = update.effective_message.text
print(f'{download_audiofile.__name__}: url={video_url}')
info = download_audiofile(video_url)
if not info:
raise Error(f"Failed downloading [this]({video_url}) video")
size = os.path.getsize(info.filename)
if size > 50000000:
os.remove(info.filename)
raise Error(f"Can not upload an audio file for [this]({video_url}) video because it is greater than 50Mb")
try:
with open(info.filename, 'rb') as audio:
context.bot.send_audio(
chat_id=update.effective_chat.id,
caption=f"{info.title}. Here is an audio only version for [this]({video_url}) video",
audio=audio,
parse_mode=telegram.ParseMode.MARKDOWN)
except:
raise Error(f"Failed uploading an audio file for [this]({video_url}) video")
except Error as e:
context.bot.send_message(chat_id=update.effective_chat.id, text=str(e), parse_mode=telegram.ParseMode.MARKDOWN)
finally:
# Always (?) delete initial message
context.bot.delete_message(chat_id=update.effective_chat.id, message_id=update.effective_message.message_id)
# Always delete "Downloading..." message
context.bot.delete_message(chat_id=update.effective_chat.id, message_id=progress_msg.message_id)
# Delete audio file if exists
if info.filename and os.path.exists(info.filename):
os.remove(info.filename)
if __name__ == "__main__":
updater = Updater(token=os.environ["TOKEN"], use_context=True, request_kwargs={"proxy_url": os.environ["PROXY"]} if "PROXY" in os.environ else None)
dispatcher = updater.dispatcher
dispatcher.add_handler(MessageHandler(Filters.text & (Filters.entity(MessageEntity.URL) | Filters.entity(MessageEntity.TEXT_LINK)), bot_download))
dispatcher.add_handler(CommandHandler('start', bot_start))
updater.start_polling()
|
the-stack_0_22576 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import (
HttpResponseRedirect,
)
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET
from django.views.generic import (
RedirectView,
TemplateView,
View,
)
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import (
BaseFormView,
DeleteView,
FormView,
UpdateView,
)
from django_registration.backends.activation.views import (
ActivationView,
RegistrationView,
)
from pytz import common_timezones
from core.utils import json_response
from .forms import (
CoffeestatsRegistrationForm,
SelectTimeZoneForm,
SettingsForm,
SubmitCaffeineForm,
)
from .models import (
ACTION_TYPES,
Action,
Caffeine,
DRINK_TYPES,
User,
)
ACTIVATION_SUCCESS_MESSAGE = _('Your account has been activated successfully.')
DELETE_ACCOUNT_MESSAGE = _(
'Your account and all your caffeine submissions have been deleted.')
DELETE_CAFFEINE_SUCCESS_MESSAGE = _('Entry deleted successfully!')
EMAIL_CHANGE_SUCCESS_MESSAGE = _(
'Your email address has been changed successfully.')
EXPORT_SUCCESS_MESSAGE = _(
'Your data has been exported. You will receive an email with two CSV '
'files with your coffee and mate registrations attached.'
)
REGISTRATION_SUCCESS_MESSAGE = _('You got it.')
REGISTRATION_MAILINFO_MESSAGE = _(
'We have sent you an email with a link to activate your account')
SETTINGS_EMAIL_CHANGE_MESSAGE = _(
'We sent an email with a link that you need to open to confirm the '
'change of your email address.'
)
SELECT_TIMEZONE_SUCCESS_MESSAGE = _(
'Your time zone has been set to %(timezone)s successfully.')
SETTINGS_PASSWORD_CHANGE_SUCCESS = _('Successfully changed your password!')
SETTINGS_SUCCESS_MESSAGE = _('Successfully updated your profile information!')
SUBMIT_CAFFEINE_SUCCESS_MESSAGE = _('Your %(caffeine)s has been registered')
class AboutView(LoginRequiredMixin, TemplateView):
template_name = 'about.html'
class ExploreView(LoginRequiredMixin, TemplateView):
template_name = 'explore.html'
def get_context_data(self, **kwargs):
context_data = super(ExploreView, self).get_context_data(**kwargs)
context_data.update({
'activities': Caffeine.objects.latest_caffeine_activity(10),
'users': User.objects.random_users(4),
'topcoffee': Caffeine.objects.top_consumers_total(
DRINK_TYPES.coffee, 10),
'topcoffeeavg': Caffeine.objects.top_consumers_average(
DRINK_TYPES.coffee, 10),
'topmate': Caffeine.objects.top_consumers_total(
DRINK_TYPES.mate, 10),
'topmateavg': Caffeine.objects.top_consumers_average(
DRINK_TYPES.mate, 10),
'topcoffeerecent': Caffeine.objects.top_consumers_recent(
DRINK_TYPES.coffee, 10, interval='30 days'),
'topmaterecent': Caffeine.objects.top_consumers_recent(
DRINK_TYPES.mate, 10, interval='30 days'),
'recentlyjoined': User.objects.recently_joined(5),
'longestjoined': User.objects.longest_joined(
count=5, days=365)})
return context_data
class ExportActivityView(LoginRequiredMixin, RedirectView):
permanent = False
url = reverse_lazy('settings')
def get_redirect_url(self, *args, **kwargs):
self.request.user.export_csv()
messages.add_message(
self.request, messages.INFO,
EXPORT_SUCCESS_MESSAGE)
return super(ExportActivityView, self).get_redirect_url(
*args, **kwargs)
class DeleteAccountView(LoginRequiredMixin, DeleteView):
model = User
success_url = reverse_lazy('home')
def get_success_url(self):
logout(self.request)
messages.add_message(
self.request, messages.INFO,
DELETE_ACCOUNT_MESSAGE)
return super(DeleteAccountView, self).get_success_url()
def get_object(self, queryset=None):
return self.request.user
class ImprintView(TemplateView):
template_name = 'imprint.html'
class IndexView(TemplateView):
template_name = 'index.html'
class OverallView(TemplateView):
template_name = 'overall.html'
def get_context_data(self, **kwargs):
total = Caffeine.objects.total_caffeine()
context_data = super(OverallView, self).get_context_data(**kwargs)
context_data.update({
'coffees': total[DRINK_TYPES.coffee],
'mate': total[DRINK_TYPES.mate],
'todaydata': Caffeine.objects.hourly_caffeine(),
'monthdata': Caffeine.objects.daily_caffeine(),
'yeardata': Caffeine.objects.monthly_caffeine_overall(),
'byhourdata': Caffeine.objects.hourly_caffeine_overall(),
'byweekdaydata': Caffeine.objects.weekdaily_caffeine_overall(),
})
return context_data
class PublicProfileView(TemplateView):
template_name = 'profile.html'
ownprofile = False
profileuser = None
def get_context_data(self, **kwargs):
context = super(PublicProfileView, self).get_context_data(**kwargs)
if 'username' in self.kwargs:
self.profileuser = get_object_or_404(
User, username=self.kwargs['username'])
else:
self.profileuser = self.request.user
total = Caffeine.objects.total_caffeine_for_user(self.profileuser)
todaydata = Caffeine.objects.hourly_caffeine_for_user(
self.profileuser)
monthdata = Caffeine.objects.daily_caffeine_for_user(
self.profileuser)
yeardata = Caffeine.objects.monthly_caffeine_for_user(
self.profileuser)
byhourdata = Caffeine.objects.hourly_caffeine_for_user_overall(
self.profileuser)
byweekdaydata = Caffeine.objects.weekdaily_caffeine_for_user_overall(
self.profileuser)
context.update({
'byhourdata': byhourdata,
'byweekdaydata': byweekdaydata,
'coffees': total[DRINK_TYPES.coffee],
'mate': total[DRINK_TYPES.mate],
'monthdata': monthdata,
'ownprofile': self.ownprofile,
'profileuser': self.profileuser,
'todaydata': todaydata,
'yeardata': yeardata,
})
return context
class ProfileView(PublicProfileView):
ownprofile = True
def get(self, request, *args, **kwargs):
if 'u' in request.GET:
return HttpResponseRedirect(reverse('public', kwargs={
'username': request.GET['u']}))
if not request.user.is_authenticated:
return HttpResponseRedirect(
"%s?next=%s" % (settings.LOGIN_URL, request.path))
return super(ProfileView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data(**kwargs)
entries = Caffeine.objects.latest_caffeine_for_user(
self.profileuser)
context.update({
'entries': entries,
})
return context
class CaffeineActivationView(ActivationView):
def get_success_url(self, user=None):
messages.add_message(
self.request, messages.SUCCESS, ACTIVATION_SUCCESS_MESSAGE)
return reverse_lazy('home')
class CaffeineRegistrationView(RegistrationView):
"""
Customized version of the RegistrationView.
"""
form_class = CoffeestatsRegistrationForm
def get_success_url(self, user=None):
messages.add_message(
self.request, messages.SUCCESS, REGISTRATION_SUCCESS_MESSAGE)
messages.add_message(
self.request, messages.INFO, REGISTRATION_MAILINFO_MESSAGE)
return reverse_lazy('home')
def register(self, form):
new_user = super(CaffeineRegistrationView, self).register(form)
new_user.first_name = form.cleaned_data['firstname']
new_user.last_name = form.cleaned_data['lastname']
new_user.location = form.cleaned_data['location']
new_user.save()
return new_user
class RegistrationClosedView(TemplateView):
template_name = 'django_registration/registration_closed.html'
class SettingsView(LoginRequiredMixin, FormView):
template_name = 'settings.html'
form_class = SettingsForm
success_url = reverse_lazy('settings')
def get_form_kwargs(self):
kwargs = super(SettingsView, self).get_form_kwargs()
kwargs.update({
'instance': self.request.user,
})
return kwargs
def get_context_data(self, **kwargs):
context = super(SettingsView, self).get_context_data(**kwargs)
user = self.request.user
applications = user.caffeine_oauth2_coffeestatsapplication.count()
tokens = user.oauth2_provider_accesstoken.count()
context.update({
'oauth2_applications': applications > 0,
'oauth2_tokens': tokens > 0,
})
return context
def send_email_change_mail(self, form):
ctx_dict = {
'email': form.email_action.data,
'expiration_days': settings.EMAIL_CHANGE_ACTION_VALIDITY,
'action_link': self.request.build_absolute_uri(
reverse('confirm_action',
kwargs={'code': form.email_action.code})),
'user': self.request.user,
}
subject = render_to_string(
'django_registration/email_change_email_subject.txt', ctx_dict)
subject = ''.join(subject.splitlines())
body = render_to_string('django_registration/email_change_email.txt',
ctx_dict)
form.instance.email_user(subject, body, settings.DEFAULT_FROM_EMAIL)
messages.add_message(
self.request, messages.INFO,
SETTINGS_EMAIL_CHANGE_MESSAGE)
def form_valid(self, form):
messages.add_message(
self.request, messages.SUCCESS,
SETTINGS_SUCCESS_MESSAGE)
if form.email_action:
self.send_email_change_mail(form)
form.save()
if form.password_set:
messages.add_message(
self.request, messages.SUCCESS,
SETTINGS_PASSWORD_CHANGE_SUCCESS)
return super(SettingsView, self).form_valid(form)
class ConfirmActionView(SingleObjectMixin, View):
model = Action
slug_field = 'code'
slug_url_kwarg = 'code'
def get(self, request, *args, **kwargs):
action = self.get_object()
data, user = action.data, action.user
if action.atype == ACTION_TYPES.change_email:
user.email = data
messages.add_message(
request, messages.SUCCESS,
EMAIL_CHANGE_SUCCESS_MESSAGE)
user.save()
action.delete()
return HttpResponseRedirect(reverse_lazy('home'))
class OnTheRunView(TemplateView):
template_name = "ontherun.html"
def get_context_data(self, **kwargs):
get_object_or_404(
User, username=self.kwargs['username'],
token=self.kwargs['token'])
return super(OnTheRunView, self).get_context_data(**kwargs)
class OnTheRunOldView(RedirectView):
permanent = True
def get_redirect_url(self, *args, **kwargs):
user = get_object_or_404(
User, username=self.request.GET.get('u'),
token=self.request.GET.get('t'))
return reverse('ontherun', kwargs={
'username': user.username,
'token': user.token})
class BaseSubmitCaffeineView(BaseFormView):
form_class = SubmitCaffeineForm
http_method_names = ['post']
def form_valid(self, form):
caffeine = form.save()
messages.add_message(
self.request, messages.SUCCESS,
SUBMIT_CAFFEINE_SUCCESS_MESSAGE % {
'caffeine': caffeine},
extra_tags='registerdrink')
return super(BaseSubmitCaffeineView, self).form_valid(form)
def form_invalid(self, form):
for field in form.errors:
for error in form.errors[field]:
messages.add_message(
self.request, messages.ERROR, error,
extra_tags='registerdrink')
return HttpResponseRedirect(self.get_success_url())
class SubmitCaffeineView(LoginRequiredMixin, BaseSubmitCaffeineView):
def get_form_kwargs(self):
kwargs = super(SubmitCaffeineView, self).get_form_kwargs()
kwargs.update({
'user': self.request.user,
'ctype': getattr(DRINK_TYPES, self.kwargs['drink']),
})
return kwargs
def get_success_url(self):
return reverse('profile')
class SubmitCaffeineOnTheRunView(BaseSubmitCaffeineView):
def get_form_kwargs(self):
user = get_object_or_404(
User, username=self.kwargs['username'], token=self.kwargs['token'])
kwargs = super(SubmitCaffeineOnTheRunView, self).get_form_kwargs()
kwargs.update({
'user': user,
'ctype': getattr(DRINK_TYPES, self.kwargs['drink']),
})
return kwargs
def get_success_url(self):
return reverse_lazy('ontherun', kwargs={
'username': self.kwargs['username'],
'token': self.kwargs['token']})
class DeleteCaffeineView(LoginRequiredMixin, DeleteView):
"""
View for deleting caffeine instances.
"""
model = Caffeine
success_url = reverse_lazy('profile')
def get_queryset(self):
"""
Make sure that only own caffeine can be deleted.
"""
return super(DeleteCaffeineView, self).get_queryset().filter(
user=self.request.user)
def get_success_url(self):
"""
Return the success URL and add a message about the successful deletion.
"""
messages.add_message(
self.request, messages.SUCCESS, DELETE_CAFFEINE_SUCCESS_MESSAGE)
return super(DeleteCaffeineView, self).get_success_url()
class SelectTimeZoneView(LoginRequiredMixin, UpdateView):
form_class = SelectTimeZoneForm
template_name = 'selecttimezone.html'
def get_context_data(self, **kwargs):
context = super(SelectTimeZoneView, self).get_context_data(**kwargs)
context.update({
'tzlist': common_timezones
})
return context
def form_valid(self, form):
form.save()
messages.add_message(
self.request, messages.SUCCESS,
SELECT_TIMEZONE_SUCCESS_MESSAGE % {
'timezone': form.cleaned_data['timezone']})
return super(SelectTimeZoneView, self).form_valid(form)
def get_success_url(self):
success_url = self.request.GET.get('next', reverse('profile'))
if success_url == reverse('select_timezone'):
success_url = reverse('profile')
return success_url
def get_object(self, queryset=None):
return self.request.user
@require_GET
@login_required
@json_response
def random_users(request):
data = []
for user in User.objects.random_users(int(request.GET.get('count', 5))):
data.append({
'username': user.username,
'name': user.get_full_name(),
'location': user.location,
'profile': request.build_absolute_uri(
reverse('public', kwargs={'username': user.username})),
'coffees': user.coffees,
'mate': user.mate})
return data
|
the-stack_0_22578 | #!/usr/bin/env python3
# coding=utf-8
from collections import defaultdict
from itertools import chain, combinations
import os
import sys
import numpy as np
import torch
import torchvision.transforms.functional as F
ALL_MEANS = {
'vgg16': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_5class': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_8class': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_9class': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_16class': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_24class': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_10class_dp_sgd': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16_10class': [129.186279296875, 104.76238250732422, 93.59396362304688],
'vgg16bn': [131.45376586914062, 103.98748016357422, 91.46234893798828],
'resnet50': [131.0912, 103.8827, 91.4953],
'inception_resnetv1_vggface2': [127.5, 127.5, 127.5],
'inception_resnetv1_vggface2_8631': [127.5, 127.5, 127.5],
'inception_resnetv1_casia': [127.5, 127.5, 127.5],
'sphere20a': [127.5, 127.5, 127.5],
'ccs19ami_facescrub': [0., 0., 0.],
'ccs19ami_facescrub_rgb': [0., 0., 0.],
'azure': [0., 0., 0.],
'cat_resnet18': [0.485*255, 0.456*255, 0.406*255],
'resnet18_10class': [0.485*255, 0.456*255, 0.406*255],
'car_resnet34': [0.5*255, 0.5*255, 0.5*255],
'resnet50_8631': [91.4953, 103.8827, 131.0912], # Note: BGR!!
'resnet50_8631_adv': [91.4953, 103.8827, 131.0912], # Note: BGR!!
'resnet50_8631_vib': [91.4953, 103.8827, 131.0912], # Note: BGR!!
'resnet50_100': [91.4953, 103.8827, 131.0912], # Note: BGR!!
'resnet50_100_adv': [91.4953, 103.8827, 131.0912], # Note: BGR!!
'resnet50_100_vib': [91.4953, 103.8827, 131.0912], # Note: BGR!!
}
ALL_STDS = {
'vgg16': [1., 1., 1.],
'vgg16_5class': [1., 1., 1.],
'vgg16_8class': [1., 1., 1.],
'vgg16_9class': [1., 1., 1.],
'vgg16_16class': [1., 1., 1.],
'vgg16_24class': [1., 1., 1.],
'vgg16_10class_dp_sgd': [1., 1., 1.],
'vgg16_10class': [1., 1., 1.],
'vgg16bn': [1., 1., 1.],
'resnet50': [1., 1., 1.],
'inception_resnetv1_vggface2': [128.0, 128.0, 128.0],
'inception_resnetv1_vggface2_8631': [128.0, 128.0, 128.0],
'inception_resnetv1_casia': [128.0, 128.0, 128.0],
'sphere20a': [128.0, 128.0, 128.0],
'ccs19ami_facescrub': [255., 255., 255.],
'ccs19ami_facescrub_rgb': [255., 255., 255.],
'azure': [255., 255., 255.],
'cat_resnet18': [0.229*255, 0.224*255, 0.225*255],
'resnet18_10class': [0.229*255, 0.224*255, 0.225*255],
'car_resnet34': [0.5*255, 0.5*255, 0.5*255],
'resnet50_8631': [1., 1., 1.],
'resnet50_8631_adv': [1., 1., 1.],
'resnet50_8631_vib': [1., 1., 1.],
'resnet50_100': [1., 1., 1.],
'resnet50_100_adv': [1., 1., 1.],
'resnet50_100_vib': [1., 1., 1.],
}
def denormalize(image_tensor, arch_name):
"""
output image is in [0., 1.] and RGB channel
"""
std = ALL_STDS[arch_name]
mean = ALL_MEANS[arch_name]
image_tensor = image_tensor * torch.tensor(std, device=image_tensor.device)[:, None, None] + torch.tensor(mean, device=image_tensor.device)[:, None, None]
image_tensor = image_tensor / 255.
if 'resnet50_100' in arch_name or 'resnet50_8631' in arch_name:
# change BGR to RGB
if image_tensor.ndim == 4:
assert image_tensor.shape[1] == 3
image_tensor = image_tensor[:, [2, 1, 0]]
else:
assert image_tensor.ndim == 3
assert image_tensor.shape[0] == 3
image_tensor = image_tensor[[2, 1, 0]]
return torch.clamp(image_tensor, 0., 1.)
def normalize(image_tensor, arch_name):
"""
input image is in [0., 255.] and RGB channel
"""
if 'resnet50_100' in arch_name or 'resnet50_8631' in arch_name:
# change RGB to BGR
if image_tensor.ndim == 4:
assert image_tensor.shape[1] == 3
image_tensor = image_tensor[:, [2, 1, 0]]
else:
assert image_tensor.ndim == 3
assert image_tensor.shape[0] == 3
image_tensor = image_tensor[[2, 1, 0]]
std = ALL_STDS[arch_name]
mean = ALL_MEANS[arch_name]
image_tensor = (image_tensor-torch.tensor(mean, device=image_tensor.device)[:, None, None])/torch.tensor(std, device=image_tensor.device)[:, None, None]
return image_tensor
def crop_img_for_sphereface(img):
assert len(img.shape) == 3 or len(img.shape) == 4
# resize the img to 256 first because the following crop area are defined in 256 scale
img = F.resize(img, (256, 256))
return img[..., 16:226, 38:218]
def crop_img_for_ccs19ami(img):
raise AssertionError('do not use this')
assert len(img.shape) == 3 or len(img.shape) == 4
# resize the img to 256 first because the following crop area are defined in 256 scale
img = F.resize(img, (256, 256))
return img[..., 34:214, 40:220]
def crop_img(img, arch_name):
if arch_name == 'sphere20a':
return crop_img_for_sphereface(img)
elif arch_name.startswith('ccs19ami'):
raise AssertionError('do not use white-box attack for ccs19ami')
return crop_img_for_ccs19ami(img)
else:
return img
def resize_img(img, image_resolution):
if not isinstance(image_resolution, tuple):
image_resolution = (image_resolution, image_resolution)
return F.resize(img, image_resolution)
def clip(image_tensor, use_fp16, arch_name):
assert not use_fp16
std = ALL_STDS[arch_name]
mean = ALL_MEANS[arch_name]
mean = torch.tensor(mean, device=image_tensor.device)[:, None, None]
std = torch.tensor(std, device=image_tensor.device)[:, None, None]
image_tensor = image_tensor.detach().clone()*std + mean
return (torch.clamp(image_tensor, 0., 255.) - mean) / std
def clip_quantile_bound(inputs, all_mins, all_maxs):
clipped = torch.max(torch.min(inputs, all_maxs), all_mins)
return clipped
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(2, len(s)+1))
def compute_topk_labels(logits, k):
topk_conf, topk_ind = torch.topk(logits, k, dim=1, largest=True, sorted=True)
# print(f'topk_ind.shape: {topk_ind.shape}')
# print(topk_ind)
return topk_ind.tolist()
def find_most_overlapped_topk(labels_list, target, k=1):
comb_ind_list = list(powerset(range(len(labels_list))))
# print(len(comb_ind_list), comb_ind_list)
labels_list = [set(x[1:]) if x[0]==target else set() for x in labels_list]
common_labels_dict = {}
for comb_ind in comb_ind_list:
t_set = labels_list[comb_ind[0]]
for i in comb_ind[1:]:
t_set = t_set.intersection(labels_list[i])
if len(t_set) > 0:
common_labels_dict[comb_ind] = t_set
# print(common_labels_dict)
for comb_ind in sorted(common_labels_dict.keys(), key=lambda x: len(x), reverse=True):
if len(common_labels_dict[comb_ind]) >= k:
return comb_ind
# print('decrease k to 1')
k = 1
for comb_ind in sorted(common_labels_dict.keys(), key=lambda x: len(x), reverse=True):
if len(common_labels_dict[comb_ind]) >= k:
return comb_ind
# return all indices
return list(range(len(labels_list)))
def my_select_ind(confs, target):
labels_list = compute_topk_labels(confs, 5)
comb_ind = find_most_overlapped_topk(labels_list, target, 2)
confs = confs[comb_ind, target]
ind = torch.argmax(confs).item()
return comb_ind[ind]
def create_folder(folder):
if os.path.exists(folder):
assert os.path.isdir(folder), 'it exists but is not a folder'
else:
os.makedirs(folder)
class Tee(object):
# from https://github.com/MKariya1998/GMI-Attack/blob/master/Celeba/utils.py
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
if '...' not in data:
self.file.write(data)
self.stdout.write(data)
self.flush()
def flush(self):
self.file.flush()
CONF_MASKS = torch.load('./conf_mask.pt')
def add_conf_to_tensor_(tensor, conf, color, highlight):
""" Note: in-place modification on tensor
"""
assert tensor.ndim == 3 and tensor.shape[0] == 3, 'tensor shape should be 3xHxW'
mask = CONF_MASKS[conf]
tensor[:, -46:-10, 10:120] = (1.-mask) * tensor[:, -46:-10, 10:120] + mask * color
if highlight:
width = 5
tensor[0, :width, :] = 1.
tensor[0, -width:, :] = 1.
tensor[0, :, :width] = 1.
tensor[0, :, -width:] = 1.
def add_conf_to_tensors(tensors, confs, color=torch.tensor([1., 0., 0.]).unsqueeze(1).unsqueeze(1), highlight_conf=None):
""" Note: will clone the tensors to cpu
"""
if len(tensors) != len(confs):
raise AssertionError(f'{len(tensors)} != {len(confs)}, tensors.shape: {tensors.shape}')
tensors = tensors.detach().cpu().clone()
if highlight_conf is not None:
highlight_confs = [x>=highlight_conf for x in confs]
else:
highlight_confs = [False] * len(confs)
confs = [f'{x:.4f}' for x in confs]
for i in range(len(tensors)):
add_conf_to_tensor_(tensors[i], confs[i], color, highlight_confs[i])
return tensors
def crop_and_resize(img, arch_name, resolution):
img = crop_img(img, arch_name)
img = resize_img(img, resolution)
return img
if __name__ == '__main__':
labels_list = [[1, 2377, 17, 1570, 2051],
[1, 2377, 17, 1570, 2051],
[1, 2377, 17, 2051, 1570],
[1, 2377, 17, 2051, 1570],
[1, 2377, 17, 1570, 2241],
[1, 848, 1915, 1806, 853],
[1, 1915, 853, 61, 1855],
[1, 35, 1915, 2217, 61]]
labels_list = [[4, 1856, 2474, 674, 2171],
[4, 2235, 935, 2173, 844],
[4, 2611, 2173, 844, 935],
[4, 152, 27, 844, 2611],
[4, 1856, 199, 674, 2171],
[4, 2474, 2171, 1856, 139],
[4, 1027, 837, 10, 1440],
[4, 837, 1319, 1027, 1440]]
target = 4
k = 2
comb_ind = find_most_overlapped_topk(labels_list, target, k)
print('returned', comb_ind)
|
the-stack_0_22579 | """
TriviaBot Version 2
Made by Sai Sameer Pusapaty
"""
import io
import os
import crayons
from threading import Thread
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
from googleapiclient.discovery import build
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '[CREDENTIALS PATH]'
common = ['the', 'of', 'and', 'to', 'a', 'in', 'for', 'is', 'on', 'that', 'by', 'this', 'with', 'i', 'you', 'it', 'not', 'or', 'be', 'are', 'from', 'at', 'as']
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
#READING TEXT FROM IMAGE
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
overall = []
for text in texts:
overall.append('"{}"'.format(text.description))
prob = overall[0]
question = ''
index = 1
for c in prob[1:len(prob)-1]:
if c != '?':
question += c
else:
prob = prob[index+1:len(prob)-1]
question += '?'
break
index += 1
answers = [s.strip() for s in prob.splitlines()]
answers = answers[len(answers)-3:]
question = question.lower()
return question,answers
q,a = detect_text('[IMAGE_PATH]')
ifNOT = False
if "not" in q.split():
ifNOT = True
q = q.split()
q.remove("not")
q = ' '.join(q)
if "never" in q.split():
ifNOT = True
q = q.split()
q.remove("never")
q = ' '.join(q)
searches = [q]
for answer in a:
searches.append(q+" "+answer)
#SEARCHING QUESTION ON GOOGLE SEARCH
my_api_key = "##############################"
my_cse_id = "#########################"
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
output = {}
def crawl(q,a,search,output,index):
results = google_search(
search, my_api_key, my_cse_id, num=10)
ansDict = {}
for ans in a:
ansDict[ans] = 0
i = 1
for result in results:
ansDic = []
final = result['snippet'] + " " + result['title'] + " "
for ans in a:
ansCount = 0
for e in ans.split():
if (e.lower() in common) or (e.lower() in letters):
continue
ansCount += final.count(e)
ansCount += final.count(ans)
ansDict[ans] += ansCount
ansDic.append((ans, ansDict[ans] / i))
if i == 10:
test1 = ansDic[::-1]
i += 1
output[index] = test1
##################################
threads = []
for ii in range(len(searches)):
# We start one thread per url present.
process = Thread(target=crawl, args=[q,a,searches[ii], output, ii])
process.start()
threads.append(process)
# We now pause execution on the main thread by 'joining' all of our started threads.
# This ensures that each has finished processing the urls.
for process in threads:
process.join()
#########################################
sums = {}
for i in range(4):
for e in output[i]:
if e[0] not in sums:
sums[e[0]] = e[1]
else:
sums[e[0]] += e[1]
total_sum = 0
for k in sums:
total_sum += sums[k]
test = []
for k in sums:
sums[k] = sums[k]*100/total_sum
test.append((k,sums[k]))
test.sort(key=lambda x: x[1])
test = test[::-1]
if ifNOT:
test = test[::-1]
for i in range(len(test)):
print(crayons.green(test[i][0]), end="")
print(crayons.green(" -- " + str("%.2f" % test[i][1] + "%")))
|
the-stack_0_22580 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <[email protected]>
# (c) 1998-2021 all rights reserved
def test():
"""
Exercise the simplest non-trivial use case
"""
# get the journal
import journal
# make a channel
channel = journal.firewall(name="test.journal.firewall")
# send the output to the trash
channel.device = journal.trash()
# make the firewall non-fatal
channel.fatal = False
# inject
channel.log("hello world!")
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
|
the-stack_0_22581 | """
Given n non-negative integers representing an elevation map where the width of each bar is 1,
compute how much water it is able to trap after raining.
Example:
Input: [0,1,0,2,1,0,1,3,2,1,2,1]
Output: 6
"""
from typing import List
def max_water(elevation: List[int]) -> int:
size = len(elevation)
left_largest = [0] * size
right_largest = [0] * size
for index in range(1, size):
left_largest[index] = max(left_largest[index - 1], elevation[index - 1])
for index in range(size - 2, -1, -1):
right_largest[index] = max(right_largest[index + 1], elevation[index + 1])
water = 0
for index in range(1, size - 1): # water at index
water += max(
0, min(left_largest[index], right_largest[index]) - elevation[index]
)
return water
if __name__ == "__main__":
assert max_water([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]) == 6
|
the-stack_0_22582 | from rest_framework import serializers
from cotidia.admin.serializers import BaseDynamicListSerializer
from cotidia.team.serializers import MemberAdminSerializer
from consult.models import Booking
from consult.serializers.servicetype import ServiceTypeAdminSerializer
from consult.serializers.customer import CustomerAdminSerializer
class BookingAdminSerializer(BaseDynamicListSerializer):
service_type = ServiceTypeAdminSerializer()
member = MemberAdminSerializer()
customer = CustomerAdminSerializer()
status = serializers.CharField()
class Meta:
model = Booking
exclude = ["id"]
class SearchProvider:
display_field = "name"
filters = "__all__"
default_columns = [
"datetime",
"time",
"member",
"customer",
"service_type",
"status",
]
general_query_fields = [
"member__first_name",
"member__last_name",
"customer__first_name",
"customer__last_name" "service_type__name",
]
toolbar_filters = ["datetime"]
class BookingCalendarSerializer(serializers.ModelSerializer):
title = serializers.CharField(source="calendar_title")
url = serializers.CharField(source="calendar_url")
date = serializers.DateField()
class Meta:
model = Booking
fields = ("title", "date", "service_type", "url")
def to_representation(self, instance):
ret = super().to_representation(instance)
# ret["job_uuid"] = instance.callsheet.uuid
# ret["day_reference"] = instance.day_reference
# ret["day_reference_complete"] = instance.get_day_reference()
# ret["status_verbal"] = instance.status_verbal()
# ret["client"] = instance.callsheet.client.__str__()
return ret
|
the-stack_0_22584 | import copy
import os
import random
import sys
import numpy as np
import scipy.stats as st
import Libs.confs
import Libs.genos_file_formats
import Libs.logger
import Libs.pretty_printing
import Libs.priors
def ParseCmd(argv):
if len(argv) < 5:
print('Usage ' + os.path.basename(argv[0]) + ' K N L D [--disease]')
print('')
print(' K : Number of clusters')
print(' N : Number of individuals in each cluster')
print(' L : Number of loci')
print(' D : Percent of loci with different MAF')
print(' --disease : Make disease labels (default is false)')
exit(1)
num_clusters = int(argv[1])
num_indivs = int(argv[2])
num_loci = int(argv[3])
diff_maf = int(argv[4])
is_disease = False
if '--disease' in argv:
is_disease = True
Libs.logger.Log(' K : ' + str(num_clusters))
Libs.logger.Log(' N : ' + str(num_indivs))
Libs.logger.Log(' L : ' + str(num_loci))
Libs.logger.Log(' D : ' + str(diff_maf))
Libs.logger.Log(' Is Disease : ' + str(is_disease))
return num_clusters, num_indivs, num_loci, diff_maf, is_disease
def SelectLociDiffMAFs(num_loci, diff_mafs_percent):
loci_list = []
num_diff_loci = int(num_loci / 100.0 * diff_mafs_percent)
Libs.logger.Log('Num Diff Loci : %d' % num_diff_loci)
num_diff_loci = max(1, int(num_diff_loci + 0.5))
while len(loci_list) < num_diff_loci:
l = int(random.uniform(0, num_loci))
if l not in loci_list:
loci_list.append(l)
loci_list = sorted(loci_list)
return loci_list
def IsDuplicated(loci, loci_list):
for k in range(len(loci_list)):
if loci in loci_list[k]:
return True
return False
def SelectDiseaseLociAndSNPs(num_loci, num_clusters, diff_loci):
loci_list = [ [] for k in range(num_clusters) ]
for k in range(num_clusters):
while len(loci_list[k]) < 3:
l = int(random.uniform(0, num_loci))
if not IsDuplicated(l, loci_list):
loci_list[k].append(l)
loci_list[k] = sorted(loci_list[k])
snp_list = [ [ int(random.uniform(0, Libs.priors.NUM_ALLELES)) for i in range(3) ] for k in range(num_clusters) ]
return loci_list, snp_list
def MakeRandomBaseFreq(num_loci):
return [ round(random.uniform(0, 1), 2) for l in range(num_loci) ]
def GetRoundGenoProb():
GENOS_WHEEL = [ 1.0 / 3, 2.0 / 3, 1.0 ]
PROBS = [ 0, 0.5, 1 ]
u = random.uniform(0, 1)
for g in range(len(GENOS_WHEEL)):
if u < GENOS_WHEEL[g]:
return PROBS[g]
return PROBS[len(PROBS)]
def MakeRoundBaseFreq(num_loci):
return [ GetRoundGenoProb() for l in range(num_loci) ]
def MakeRandomFreqs(num_clusters, num_loci, diff_loci):
base_freq = MakeRandomBaseFreq(num_loci)
freqs = [ copy.deepcopy(base_freq) for k in range(num_clusters) ]
for k in range(1, num_clusters):
for l in diff_loci:
freqs[k][l] = round(random.uniform(0, 1), 2)
return freqs
def MakeRoundFreqs(num_clusters, num_loci, diff_loci):
base_freq = MakeRoundBaseFreq(num_loci)
freqs = [ copy.deepcopy(base_freq) for k in range(num_clusters) ]
for k in range(1, num_clusters):
for l in diff_loci:
freqs[k][l] = GetRoundGenoProb()
return freqs
def MakeFreqs(num_clusters, num_loci, diff_loci, is_random=False):
if is_random:
return MakeRandomFreqs(num_clusters, num_loci, diff_loci)
return MakeRoundFreqs(num_clusters, num_loci, diff_loci)
def GetGenos(freq):
u1 = random.uniform(0, 1)
a1 = 1 if u1 < freq else 0
u2 = random.uniform(0, 1)
a2 = 1 if u2 < freq else 0
return a1 + a2
def DrawGenos(freqs):
num_loci = len(freqs)
genos = [ GetGenos(freqs[l]) for l in range(num_loci) ]
return genos
def CalcDiseaseStatus(genotype, cluster, dise_loci, snp_list, is_bernoulli=False):
snps = [ genotype[l] for l in dise_loci[cluster] ]
indic = [ 1 if snps[l] == snp_list[cluster][l] else 0 for l in range(len(snps)) ]
z = 1.5 * np.prod(indic)
prob = 1.0 / (1.0 + np.exp(-z))
y = 0 if prob <= 0.5 else 1
if is_bernoulli:
y = st.bernoulli.rvs(prob)
return y == 1
def WriteToFile(genos, labels, diff_loci, freqs, dise_loci, snp_list, params_tuple):
num_clusters = params_tuple[0]
if not os.path.isdir('Dataset'):
os.makedirs('Dataset')
# Write compressed file.
str_path = os.path.join('Dataset', 'genos_K%d_N%d_L%d_D%d.str' % params_tuple)
Libs.genos_file_formats.WriteSTRUCTURE(str_path, genos)
# Write STRUCTURE formatted genotypes.
my_str_path = os.path.join('Dataset', 'genos_K%d_N%d_L%d_D%d.txt' % params_tuple )
Libs.genos_file_formats.WriteMySTRUCTURE(my_str_path, genos, lbls, num_clusters)
# Write allele frequencies.
freq_path = os.path.join('Dataset', 'freqs_K%d_N%d_L%d_D%d.txt' % params_tuple)
with open(freq_path, 'w') as f:
f.write('\n'.join([ str(f) for f in freqs ]))
# Write loci with difference in MAF.
diff_path = os.path.join('Dataset', 'diffs_K%d_N%d_L%d_D%d.txt' % params_tuple)
with open(diff_path, 'w') as f:
f.write(str(diff_loci))
if len(lbls) != 0:
# Write BEAM formatted genotypes.
beam_path = os.path.join('Dataset', 'genos_beam_K%d_N%d_L%d_D%d.txt' % params_tuple)
Libs.genos_file_formats.WriteBEAM(beam_path, genos, lbls)
if len(dise_loci) != 0 and len(snp_list) != 0:
# Write disease affected loci and corresponding SNPs.
dise_path = os.path.join('Dataset', 'dise_K%d_N%d_L%d_D%d.txt' % params_tuple)
with open(dise_path, 'w') as f:
for k in range(num_clusters):
f.write(' '.join([ str(d) for d in dise_loci[k] ]) + '\n')
f.write('\n')
for k in range(num_clusters):
f.write(' '.join([ str(s) for s in snp_list[k] ]) + '\n')
if __name__ == '__main__':
Libs.logger.Log('\n\nStart of mk_dataset.py')
NUM_CLUSTERS, NUM_INDIVS, NUM_LOCI, DIFF_MAF, IS_DISEASE = ParseCmd(sys.argv)
diff_loci = SelectLociDiffMAFs(NUM_LOCI, DIFF_MAF)
Libs.logger.Log('\ndiff loci:')
Libs.pretty_printing.PrintList(diff_loci)
freqs = MakeFreqs(NUM_CLUSTERS, NUM_LOCI, diff_loci, True)
Libs.logger.Log('\nfreqs:')
Libs.pretty_printing.PrintListOfLists(freqs)
dise_loci = []
snp_list = []
genos = []
lbls = []
if IS_DISEASE:
dise_loci, snp_list = SelectDiseaseLociAndSNPs(NUM_LOCI, NUM_CLUSTERS, diff_loci)
Libs.logger.Log('\ndisease loci:')
Libs.pretty_printing.PrintListOfLists(dise_loci)
Libs.logger.Log('\ndisease SNPs:')
Libs.pretty_printing.PrintListOfLists(snp_list)
for k in range(NUM_CLUSTERS):
if IS_DISEASE:
cases = []
ctrls = []
while len(cases) + len(ctrls) < NUM_INDIVS:
g = DrawGenos(freqs[k])
y = CalcDiseaseStatus(g, k, dise_loci, snp_list)
if y == True:
if len(cases) < NUM_INDIVS // 2:
cases.append(g)
else:
if len(ctrls) < NUM_INDIVS // 2 + (0 if NUM_INDIVS % 2 == 0 else 1):
ctrls.append(g)
genos += ctrls + cases
lbls += [ 0 for i in range(len(ctrls)) ] + [ 1 for i in range(len(cases)) ]
else:
genos += [ DrawGenos(freqs[k]) for n in range(NUM_INDIVS) ]
Libs.logger.Log('\ngenos:')
Libs.pretty_printing.PrintListOfLists(genos)
if IS_DISEASE:
Libs.logger.Log('\nlabels:')
Libs.pretty_printing.PrintList(lbls)
params_tuple = (NUM_CLUSTERS, NUM_INDIVS, NUM_LOCI, DIFF_MAF)
WriteToFile(genos, lbls, diff_loci, freqs, dise_loci, snp_list, params_tuple)
|
the-stack_0_22585 | ##
# @file This file is part of the ExaHyPE project.
# @author ExaHyPE Group ([email protected])
#
# @section LICENSE
#
# Copyright (c) 2016 http://exahype.eu
# All rights reserved.
#
# The project has received funding from the European Union's Horizon
# 2020 research and innovation programme under grant agreement
# No 671698. For copyrights and licensing, please consult the webpage.
#
# Released under the BSD 3 Open Source License.
# For the full license text, see LICENSE.txt
#
#
# @section DESCRIPTION
#
# Generate the converter, used to mix generic and optimized kernels
#
from .abstractModelBaseClass import AbstractModelBaseClass
class ConverterModel(AbstractModelBaseClass):
def generateCode(self):
self.context["noVarPadding"] = self.context["nVarPad"] == self.context["nVar"]
self.render("converter_h.template", "converter.h")
self.render("converter_cpp.template", "converter.cpp")
|
the-stack_0_22586 | import os
import time
import requests
from selenium import webdriver
def fetch_image_urls(query: str, max_links_to_fetch: int, wd: webdriver, sleep_between_interactions: int = 1):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# build the google query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
# load the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
# get all image thumbnail results
thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")
for img in thumbnail_results[results_start:number_results]:
# try to click every thumbnail such that we can get the real image behind it
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
# extract image urls
actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
for actual_image in actual_images:
if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
print("Found:", len(image_urls), "image links, looking for more ...")
time.sleep(30)
return
load_more_button = wd.find_element_by_css_selector(".mye4qd")
if load_more_button:
wd.execute_script("document.querySelector('.mye4qd').click();")
# move the result startpoint further down
results_start = len(thumbnail_results)
return image_urls
def persist_image(folder_path:str,url:str, counter):
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
f = open(os.path.join(folder_path, 'jpg' + "_" + str(counter) + ".jpg"), 'wb')
f.write(image_content)
f.close()
print(f"SUCCESS - saved {url} - as {folder_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
def search_and_download(search_term: str, driver_path: str, target_path='./images', number_images=10):
target_folder = os.path.join(target_path, '_'.join(search_term.lower().split(' ')))
if not os.path.exists(target_folder):
os.makedirs(target_folder)
with webdriver.Chrome(executable_path=driver_path) as wd:
res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)
counter = 0
for elem in res:
persist_image(target_folder, elem, counter)
counter += 1
# How to execute this code
# Step 1 : pip install selenium. pillow, requests
# Step 2 : make sure you have chrome installed on your machine
# Step 3 : Check your chrome version ( go to three dot then help then about google chrome )
# Step 4 : Download the same chrome driver from here " https://chromedriver.storage.googleapis.com/index.html "
# Step 5 : put it inside the same folder of this code
DRIVER_PATH = './chromedriver'
search_term = 'Cat'
# num of images you can pass it from here by default it's 10 if you are not passing
number_images = 11
search_and_download(search_term=search_term, driver_path=DRIVER_PATH,number_images=number_images) |
the-stack_0_22587 | import param
class AxisOptionsPanel(param.Parameterized):
"""
Class (try) to define all axis options available for Panel
"""
width = param.Integer(default=800, bounds=(200, 1600))
height = param.Integer(default=600, bounds=(200, 1600))
shared_axes = param.Boolean(True, doc="Share axes parameter")
grid = param.Boolean(default=False, doc="Whether to show a grid")
legend = param.ObjectSelector(default="top", objects=("top_right", "top_left", "bottom_left", "bottom_right", "right", "left", "top", "bottom", None),
doc="Whether to show a legend, or a legend position")
rot = param.Integer(default=45, bounds=(0, 180),
doc="Rotates the axis ticks along the x-axis by the specified number of degrees.")
# xlim = param.Number(8.2,bounds=(7.5,10))
# ylim = param.Number(8.2,bounds=(7.5,10))
xticks = param.Integer(default=6, bounds=(1, 10))
yticks = param.Integer(default=6, bounds=(1, 10))
colorbar = param.Boolean(default=False, doc="Enables colorbar")
invert = param.Boolean(default=False, doc="Swaps, x- y- Axis")
title = param.String(default="")
logx = param.Boolean(default=False, doc="Enables logarithmic x-axis")
logy = param.Boolean(default=False, doc=" Enables logarithmic y-axis")
loglog = param.Boolean(default=False, doc="Enables logarithmic x- and y-axis")
xaxis = param.ObjectSelector(default="bottom", objects=["top", "bottom", None])
yaxis = param.ObjectSelector(default="left", objects=["left", "right", None])
# xformatter = param.String(default="%3.f")
# yformatter = param.String(default="%3.f")
xlabel = param.String(default="", doc="Axis labels for the x-axis")
ylabel = param.String(default="", doc="Axis labels for the y-axis")
#######################
# Optional parameters #
#######################
# options = param.Parameter(precedence=3)
def __init__(self, dataframe=None, objects=None, defaults=None, **params):
"""
:param dataframe: Pandas dataframe formatted (e.g. all types are well defined,...)
:param objects: Dictionary to populate param widget
:param defaults: Dictionary to set the default value of widget
:param params: all other params
"""
try:
self.dataframe = dataframe
except Exception as e:
print(e)
self.set_options()
super(AxisOptionsPanel, self).__init__(**params)
def set_options(self):
self.plot_options = {
'width': self.width,
'height': self.height,
'shared_axes': self.shared_axes,
'grid': self.grid,
'legend': self.legend,
'rot': self.rot,
# 'xlim' : self.xlim,
# 'ylim' : self.ylim,
'xticks': self.xticks,
'yticks': self.yticks,
'colorbar': self.colorbar,
'invert': self.invert,
'title': self.title,
'logx': self.logx,
'logy': self.logy,
'loglog': self.loglog,
'xaxis': self.xaxis,
'yaxis': self.yaxis,
# 'xformatter' : self.xformatter,
# 'yformatter' : self.yformatter,
'xlabel': self.xlabel,
'ylabel': self.ylabel,
# 'padding' : self.padding,
}
def view(self):
self.set_options()
return self.plot_options
|
the-stack_0_22590 | #
# Secret Labs' Regular Expression Engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB ([email protected]).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
# 2010-01-16 mrab Python front-end re-written and extended
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to those
found in Perl. It supports both 8-bit and Unicode strings; both the pattern and
the strings being processed can contain null bytes and characters outside the
US ASCII range.
Regular expressions can contain both special and ordinary characters. Most
ordinary characters, like "A", "a", or "0", are the simplest regular
expressions; they simply match themselves. You can concatenate ordinary
characters, so last matches the string 'last'.
There are a few differences between the old (legacy) behaviour and the new
(enhanced) behaviour, which are indicated by VERSION0 or VERSION1.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the
newline at the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding
RE. Greedy means that it will match as many repetitions
as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding
RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special
characters.
*+,++,?+ Possessive versions of the previous three special
characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
{m,n}+ Possessive version of the above.
{...} Fuzzy matching constraints.
"\\" Either escapes special characters or signals a special
sequence.
[...] Indicates a set of characters. A "^" as the first
character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses. The contents are
captured and can be retrieved or matched later in the
string.
(?flags-flags) VERSION1: Sets/clears the flags for the remainder of
the group or pattern; VERSION0: Sets the flags for the
entire pattern.
(?:...) Non-capturing version of regular parentheses.
(?>...) Atomic non-capturing version of regular parentheses.
(?flags-flags:...) Non-capturing version of regular parentheses with local
flags.
(?P<name>...) The substring matched by the group is accessible by
name.
(?<name>...) The substring matched by the group is accessible by
name.
(?P=name) Matches the text matched earlier by the group named
name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the
string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ....
(?<!...) Matches if not preceded by ....
(?(id)yes|no) Matches yes pattern if group id matched, the (optional)
no pattern otherwise.
(?(DEFINE)...) If there's no group called "DEFINE", then ... will be
ignored, but any group definitions will be available.
(?|...|...) (?|A|B), creates an RE that will match either A or B,
but reuses capture group numbers across the
alternatives.
(*FAIL) Forces matching to fail, which means immediate
backtracking.
(*F) Abbreviation for (*FAIL).
(*PRUNE) Discards the current backtracking information. Its
effect doesn't extend outside an atomic group or a
lookaround.
(*SKIP) Similar to (*PRUNE), except that it also sets where in
the text the next attempt at matching the entire
pattern will start. Its effect doesn't extend outside
an atomic group or a lookaround.
The fuzzy matching constraints are: "i" to permit insertions, "d" to permit
deletions, "s" to permit substitutions, "e" to permit any of these. Limits are
optional with "<=" and "<". If any type of error is provided then any type not
provided is not permitted.
A cost equation may be provided.
Examples:
(?:fuzzy){i<=2}
(?:fuzzy){i<=1,s<=2,d<=1,1i+1s+1d<3}
VERSION1: Set operators are supported, and a set can include nested sets. The
set operators, in order of increasing precedence, are:
|| Set union ("x||y" means "x or y").
~~ (double tilde) Symmetric set difference ("x~~y" means "x or y, but not
both").
&& Set intersection ("x&&y" means "x and y").
-- (double dash) Set difference ("x--y" means "x but not y").
Implicit union, ie, simple juxtaposition like in [ab], has the highest
precedence.
VERSION0 and VERSION1:
The special sequences consist of "\\" and a character from the list below. If
the ordinary character is not on the list, then the resulting RE will match the
second character.
\number Matches the contents of the group of the same number if
number is no more than 2 digits, otherwise the character
with the 3-digit octal code.
\a Matches the bell character.
\A Matches only at the start of the string.
\b Matches the empty string, but only at the start or end of a
word.
\B Matches the empty string, but not at the start or end of a
word.
\d Matches any decimal digit; equivalent to the set [0-9] when
matching a bytestring or a Unicode string with the ASCII
flag, or the whole range of Unicode digits when matching a
Unicode string.
\D Matches any non-digit character; equivalent to [^\d].
\f Matches the formfeed character.
\g<name> Matches the text matched by the group named name.
\G Matches the empty string, but only at the position where
the search started.
\K Keeps only what follows for the entire match.
\L<name> Named list. The list is provided as a keyword argument.
\m Matches the empty string, but only at the start of a word.
\M Matches the empty string, but only at the end of a word.
\n Matches the newline character.
\N{name} Matches the named character.
\p{name=value} Matches the character if its property has the specified
value.
\P{name=value} Matches the character if its property hasn't the specified
value.
\r Matches the carriage-return character.
\s Matches any whitespace character; equivalent to
[ \t\n\r\f\v].
\S Matches any non-whitespace character; equivalent to [^\s].
\t Matches the tab character.
\uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX.
\UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code
XXXXXXXX.
\v Matches the vertical tab character.
\w Matches any alphanumeric character; equivalent to
[a-zA-Z0-9_] when matching a bytestring or a Unicode string
with the ASCII flag, or the whole range of Unicode
alphanumeric characters (letters plus digits plus
underscore) when matching a Unicode string. With LOCALE, it
will match the set [0-9_] plus characters defined as
letters for the current locale.
\W Matches the complement of \w; equivalent to [^\w].
\xXX Matches the character with 2-digit hex code XX.
\X Matches a grapheme.
\Z Matches only at the end of the string.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern at the beginning of a string.
fullmatch Match a regular expression pattern against all of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string using a
template string.
subf Substitute occurrences of a pattern found in a string using a
format string.
subn Same as sub, but also return the number of substitutions made.
subfn Same as subf, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern. VERSION1: will
split at zero-width match; VERSION0: won't split at zero-width
match.
splititer Return an iterator yielding the parts of a split string.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a Pattern object.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics or special characters in a
string.
Most of the functions support a concurrent parameter: if True, the GIL will be
released during matching, allowing other Python threads to run concurrently. If
the string changes during matching, the behaviour is undefined. This parameter
is not needed when working on the builtin (immutable) string classes.
Some of the functions in this module take flags as optional parameters. Most of
these flags can also be set within an RE:
A a ASCII Make \w, \W, \b, \B, \d, and \D match the
corresponding ASCII character categories. Default
when matching a bytestring.
B b BESTMATCH Find the best fuzzy match (default is first).
D DEBUG Print the parsed pattern.
E e ENHANCEMATCH Attempt to improve the fit after finding the first
fuzzy match.
F f FULLCASE Use full case-folding when performing
case-insensitive matching in Unicode.
I i IGNORECASE Perform case-insensitive matching.
L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the
current locale. (One byte per character only.)
M m MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string. "$" matches the end of lines
(before a newline) as well as the end of the string.
P p POSIX Perform POSIX-standard matching (leftmost longest).
R r REVERSE Searches backwards.
S s DOTALL "." matches any character at all, including the
newline.
U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the
Unicode locale. Default when matching a Unicode
string.
V0 V0 VERSION0 Turn on the old legacy behaviour.
V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag
includes the FULLCASE flag.
W w WORD Make \b and \B work with default Unicode word breaks
and make ".", "^" and "$" work with Unicode line
breaks.
X x VERBOSE Ignore whitespace and comments for nicer looking REs.
This module also defines an exception 'error'.
"""
# Public symbols.
__all__ = ["compile", "escape", "findall", "finditer", "fullmatch", "match",
"purge", "search", "split", "splititer", "sub", "subf", "subfn", "subn",
"template", "Scanner", "A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E",
"ENHANCEMATCH", "S", "DOTALL", "F", "FULLCASE", "I", "IGNORECASE", "L",
"LOCALE", "M", "MULTILINE", "P", "POSIX", "R", "REVERSE", "T", "TEMPLATE",
"U", "UNICODE", "V0", "VERSION0", "V1", "VERSION1", "X", "VERBOSE", "W",
"WORD", "error", "Regex"]
__version__ = "2.4.120"
# --------------------------------------------------------------------
# Public interface.
def match(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Try to apply the pattern at the start of the string, returning a match
object, or None if no match was found."""
return _compile(pattern, flags, kwargs).match(string, pos, endpos,
concurrent, partial)
def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Try to apply the pattern against all of the string, returning a match
object, or None if no match was found."""
return _compile(pattern, flags, kwargs).fullmatch(string, pos, endpos,
concurrent, partial)
def search(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Search through string looking for a match to the pattern, returning a
match object, or None if no match was found."""
return _compile(pattern, flags, kwargs).search(string, pos, endpos,
concurrent, partial)
def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return the string obtained by replacing the leftmost (or rightmost with a
reverse pattern) non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable; if a string,
backslash escapes in it are processed; if a callable, it's passed the match
object and must return a replacement string to be used."""
return _compile(pattern, flags, kwargs).sub(repl, string, count, pos,
endpos, concurrent)
def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return the string obtained by replacing the leftmost (or rightmost with a
reverse pattern) non-overlapping occurrences of the pattern in string by the
replacement format. format can be either a string or a callable; if a string,
it's treated as a format string; if a callable, it's passed the match object
and must return a replacement string to be used."""
return _compile(pattern, flags, kwargs).subf(format, string, count, pos,
endpos, concurrent)
def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return a 2-tuple containing (new_string, number). new_string is the string
obtained by replacing the leftmost (or rightmost with a reverse pattern)
non-overlapping occurrences of the pattern in the source string by the
replacement repl. number is the number of substitutions that were made. repl
can be either a string or a callable; if a string, backslash escapes in it
are processed; if a callable, it's passed the match object and must return a
replacement string to be used."""
return _compile(pattern, flags, kwargs).subn(repl, string, count, pos,
endpos, concurrent)
def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return a 2-tuple containing (new_string, number). new_string is the string
obtained by replacing the leftmost (or rightmost with a reverse pattern)
non-overlapping occurrences of the pattern in the source string by the
replacement format. number is the number of substitutions that were made. format
can be either a string or a callable; if a string, it's treated as a format
string; if a callable, it's passed the match object and must return a
replacement string to be used."""
return _compile(pattern, flags, kwargs).subfn(format, string, count, pos,
endpos, concurrent)
def split(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"""Split the source string by the occurrences of the pattern, returning a
list containing the resulting substrings. If capturing parentheses are used
in pattern, then the text of all groups in the pattern are also returned as
part of the resulting list. If maxsplit is nonzero, at most maxsplit splits
occur, and the remainder of the string is returned as the final element of
the list."""
return _compile(pattern, flags, kwargs).split(string, maxsplit, concurrent)
def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"Return an iterator yielding the parts of a split string."
return _compile(pattern, flags, kwargs).splititer(string, maxsplit,
concurrent)
def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
concurrent=None, **kwargs):
"""Return a list of all matches in the string. The matches may be overlapped
if overlapped is True. If one or more groups are present in the pattern,
return a list of groups; this will be a list of tuples if the pattern has
more than one group. Empty matches are included in the result."""
return _compile(pattern, flags, kwargs).findall(string, pos, endpos,
overlapped, concurrent)
def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
partial=False, concurrent=None, **kwargs):
"""Return an iterator over all matches in the string. The matches may be
overlapped if overlapped is True. For each match, the iterator returns a
match object. Empty matches are included in the result."""
return _compile(pattern, flags, kwargs).finditer(string, pos, endpos,
overlapped, concurrent, partial)
def compile(pattern, flags=0, **kwargs):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags, kwargs)
def purge():
"Clear the regular expression cache"
_cache.clear()
_locale_sensitive.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object."
return _compile(pattern, flags | TEMPLATE)
def escape(pattern, special_only=False):
"Escape all non-alphanumeric characters or special characters in pattern."
# Convert it to Unicode.
if isinstance(pattern, bytes):
p = pattern.decode("latin-1")
else:
p = pattern
s = []
if special_only:
for c in p:
if c in _METACHARS or c.isspace():
s.append("\\")
s.append(c)
elif c == "\x00":
s.append("\\000")
else:
s.append(c)
else:
for c in p:
if c in _ALNUM:
s.append(c)
elif c == "\x00":
s.append("\\000")
else:
s.append("\\")
s.append(c)
r = "".join(s)
# Convert it back to bytes if necessary.
if isinstance(pattern, bytes):
r = r.encode("latin-1")
return r
# --------------------------------------------------------------------
# Internals.
import _regex_core
import _regex
from threading import RLock as _RLock
from locale import getlocale as _getlocale
from _regex_core import *
from _regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError,
_UnscopedFlagSet, _check_group_features, _compile_firstset,
_compile_replacement, _flatten_code, _fold_case, _get_required_string,
_parse_pattern, _shrink_cache)
from _regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source as
_Source, Fuzzy as _Fuzzy)
# Version 0 is the old behaviour, compatible with the original 're' module.
# Version 1 is the new behaviour, which differs slightly.
DEFAULT_VERSION = VERSION0
_METACHARS = frozenset("()[]{}?*+|^$\\.-#")
_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
# Caches for the patterns and replacements.
_cache = {}
_cache_lock = _RLock()
_named_args = {}
_replacement_cache = {}
_locale_sensitive = {}
# Maximum size of the cache.
_MAXCACHE = 500
_MAXREPCACHE = 500
def _compile(pattern, flags=0, kwargs={}):
"Compiles a regular expression to a PatternObject."
# We won't bother to cache the pattern if we're debugging.
debugging = (flags & DEBUG) != 0
# What locale is this pattern using?
locale_key = (type(pattern), pattern)
if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0:
# This pattern is, or might be, locale-sensitive.
pattern_locale = _getlocale()[1]
else:
# This pattern is definitely not locale-sensitive.
pattern_locale = None
if not debugging:
try:
# Do we know what keyword arguments are needed?
args_key = pattern, type(pattern), flags
args_needed = _named_args[args_key]
# Are we being provided with its required keyword arguments?
args_supplied = set()
if args_needed:
for k, v in args_needed:
try:
args_supplied.add((k, frozenset(kwargs[k])))
except KeyError:
raise error("missing named list: {!r}".format(k))
args_supplied = frozenset(args_supplied)
# Have we already seen this regular expression and named list?
pattern_key = (pattern, type(pattern), flags, args_supplied,
DEFAULT_VERSION, pattern_locale)
return _cache[pattern_key]
except KeyError:
# It's a new pattern, or new named list for a known pattern.
pass
# Guess the encoding from the class of the pattern string.
if isinstance(pattern, str):
guess_encoding = UNICODE
elif isinstance(pattern, bytes):
guess_encoding = ASCII
elif isinstance(pattern, _pattern_type):
if flags:
raise ValueError("cannot process flags argument with a compiled pattern")
return pattern
else:
raise TypeError("first argument must be a string or compiled pattern")
# Set the default version in the core code in case it has been changed.
_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
global_flags = flags
while True:
caught_exception = None
try:
source = _Source(pattern)
info = _Info(global_flags, source.char_type, kwargs)
info.guess_encoding = guess_encoding
source.ignore_space = bool(info.flags & VERBOSE)
parsed = _parse_pattern(source, info)
break
except _UnscopedFlagSet:
# Remember the global flags for the next attempt.
global_flags = info.global_flags
except error as e:
caught_exception = e
if caught_exception:
raise error(caught_exception.msg, caught_exception.pattern,
caught_exception.pos)
if not source.at_end():
raise error("unbalanced parenthesis", pattern, source.pos)
# Check the global flags for conflicts.
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version not in (0, VERSION0, VERSION1):
raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE):
raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible")
if isinstance(pattern, bytes) and (info.flags & UNICODE):
raise ValueError("cannot use UNICODE flag with a bytes pattern")
if not (info.flags & _ALL_ENCODINGS):
if isinstance(pattern, str):
info.flags |= UNICODE
else:
info.flags |= ASCII
reverse = bool(info.flags & REVERSE)
fuzzy = isinstance(parsed, _Fuzzy)
# Remember whether this pattern as an inline locale flag.
_locale_sensitive[locale_key] = info.inline_locale
# Fix the group references.
caught_exception = None
try:
parsed.fix_groups(pattern, reverse, False)
except error as e:
caught_exception = e
if caught_exception:
raise error(caught_exception.msg, caught_exception.pattern,
caught_exception.pos)
# Should we print the parsed pattern?
if flags & DEBUG:
parsed.dump(indent=0, reverse=reverse)
# Optimise the parsed pattern.
parsed = parsed.optimise(info, reverse)
parsed = parsed.pack_characters(info)
# Get the required string.
req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags)
# Build the named lists.
named_lists = {}
named_list_indexes = [None] * len(info.named_lists_used)
args_needed = set()
for key, index in info.named_lists_used.items():
name, case_flags = key
values = frozenset(kwargs[name])
if case_flags:
items = frozenset(_fold_case(info, v) for v in values)
else:
items = values
named_lists[name] = values
named_list_indexes[index] = items
args_needed.add((name, values))
# Check the features of the groups.
_check_group_features(info, parsed)
# Compile the parsed pattern. The result is a list of tuples.
code = parsed.compile(reverse)
# Is there a group call to the pattern as a whole?
key = (0, reverse, fuzzy)
ref = info.call_refs.get(key)
if ref is not None:
code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )]
# Add the final 'success' opcode.
code += [(_OP.SUCCESS, )]
# Compile the additional copies of the groups that we need.
for group, rev, fuz in info.additional_groups:
code += group.compile(rev, fuz)
# Flatten the code into a list of ints.
code = _flatten_code(code)
if not parsed.has_simple_start():
# Get the first set, if possible.
try:
fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
fs_code = _flatten_code(fs_code)
code = fs_code + code
except _FirstSetError:
pass
# The named capture groups.
index_group = dict((v, n) for n, v in info.group_index.items())
# Create the PatternObject.
#
# Local flags like IGNORECASE affect the code generation, but aren't needed
# by the PatternObject itself. Conversely, global flags like LOCALE _don't_
# affect the code generation but _are_ needed by the PatternObject.
compiled_pattern = _regex.compile(pattern, info.flags | version, code,
info.group_index, index_group, named_lists, named_list_indexes,
req_offset, req_chars, req_flags, info.group_count)
# Do we need to reduce the size of the cache?
if len(_cache) >= _MAXCACHE:
with _cache_lock:
_shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE)
if not debugging:
if (info.flags & LOCALE) == 0:
pattern_locale = None
args_needed = frozenset(args_needed)
# Store this regular expression and named list.
pattern_key = (pattern, type(pattern), flags, args_needed,
DEFAULT_VERSION, pattern_locale)
_cache[pattern_key] = compiled_pattern
# Store what keyword arguments are needed.
_named_args[args_key] = args_needed
return compiled_pattern
def _compile_replacement_helper(pattern, template):
"Compiles a replacement template."
# This function is called by the _regex module.
# Have we seen this before?
key = pattern.pattern, pattern.flags, template
compiled = _replacement_cache.get(key)
if compiled is not None:
return compiled
if len(_replacement_cache) >= _MAXREPCACHE:
_replacement_cache.clear()
is_unicode = isinstance(template, str)
source = _Source(template)
if is_unicode:
def make_string(char_codes):
return "".join(chr(c) for c in char_codes)
else:
def make_string(char_codes):
return bytes(char_codes)
compiled = []
literal = []
while True:
ch = source.get()
if not ch:
break
if ch == "\\":
# '_compile_replacement' will return either an int group reference
# or a string literal. It returns items (plural) in order to handle
# a 2-character literal (an invalid escape sequence).
is_group, items = _compile_replacement(source, pattern, is_unicode)
if is_group:
# It's a group, so first flush the literal.
if literal:
compiled.append(make_string(literal))
literal = []
compiled.extend(items)
else:
literal.extend(items)
else:
literal.append(ord(ch))
# Flush the literal.
if literal:
compiled.append(make_string(literal))
_replacement_cache[key] = compiled
return compiled
# We define _pattern_type here after all the support objects have been defined.
_pattern_type = type(_compile("", 0, {}))
# We'll define an alias for the 'compile' function so that the repr of a
# pattern object is eval-able.
Regex = compile
# Register myself for pickling.
import copyreg as _copy_reg
def _pickle(pattern):
return _regex.compile, pattern._pickled_data
_copy_reg.pickle(_pattern_type, _pickle)
|
the-stack_0_22592 | from __future__ import annotations
from contextlib import contextmanager
from .logger import Logger
from ..typing import StyleOptions
class LoggedMixin:
"""
A mixin class that adds common log methods and accessors for the logger
object to which those log methods delegate.
"""
_logger: Logger
def __init__(self, logger: Logger):
self._logger = logger
super().__init__()
# -- Logger Methods --------------- --- -- -
@property
def logger(self) -> Logger:
"""Get the logger object to which log methods are delegated."""
return self._logger
@logger.setter
def logger(self, logger: Logger) -> None:
"""Set the logger object to which log methods are delegated."""
self._logger = logger
@property
def debug_enabled(self) -> bool:
"""See :meth:`logger.debug_enabled <.Logger.debug_enabled>`."""
return self._logger.debug_enabled
@property
def trace_enabled(self) -> bool:
"""See :meth:`logger.trace_enabled <.Logger.trace_enabled>`."""
return self._logger.trace_enabled
@property
def info_enabled(self) -> bool:
"""See :meth:`logger.info_enabled <.Logger.info_enabled>`."""
return self._logger.info_enabled
def debug(self,
*msgs,
bullet: str = None,
indent: str = "",
key_style: StyleOptions = None,
margin: int = 0,
style: StyleOptions = None):
"""See :meth:`logger.debug <.Logger.debug>`."""
self._logger.debug(*msgs,
bullet=bullet,
indent=indent,
key_style=key_style,
margin=margin,
style=style)
def trace(self,
*msgs,
bullet: str = None,
indent: str = "",
key_style: StyleOptions = None,
margin: int = 0,
style: StyleOptions = None):
"""See :meth:`logger.trace <.Logger.trace>`."""
self._logger.trace(*msgs,
bullet=bullet,
indent=indent,
key_style=key_style,
margin=margin,
style=style)
def info(self,
*msgs,
bullet: str = None,
indent: str = "",
key_style: StyleOptions = None,
margin: int = 0,
style: StyleOptions = None):
"""See :meth:`logger.info <.Logger.info>`."""
self._logger.info(*msgs,
bullet=bullet,
indent=indent,
key_style=key_style,
margin=margin,
style=style)
@contextmanager
def indent(self):
"""
Increase the indentation for the subsequent log calls if the log level
is info or higher.
"""
try:
if self._logger.info_enabled:
with self._logger.indent():
yield
else:
yield
finally:
pass
@contextmanager
def debug_indent(self):
"""
Increase the indentation for the subsequent log calls if the log level
is debug or higher.
"""
try:
if self._logger.debug_enabled:
with self._logger.indent():
yield
else:
yield
finally:
pass
@contextmanager
def trace_indent(self):
"""
Increase the indentation for the subsequent log calls if the log level
is trace or higher.
"""
try:
if self._logger.trace_enabled:
with self._logger.indent():
yield
else:
yield
finally:
pass
|
the-stack_0_22593 | """
A module for analysis tools focused on determining the width of
spectral features.
"""
import numpy as np
from astropy.stats.funcs import gaussian_sigma_to_fwhm
from ..manipulation import extract_region
from . import centroid
from .utils import computation_wrapper
__all__ = ['gaussian_sigma_width', 'gaussian_fwhm', 'fwhm']
def gaussian_sigma_width(spectrum, regions=None):
"""
Estimate the width of the spectrum using a second-moment analysis.
The value is scaled to match the sigma/standard deviation parameter of a
standard Gaussian profile. This will be calculated over the regions, if
they are specified.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object over which the width will be calculated.
regions: `~specutils.utils.SpectralRegion` or list of `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the gaussian sigma width. If
regions is `None`, computation is performed over entire spectrum.
Returns
-------
approx_sigma: `~astropy.units.Quantity` or list (based on region input)
Approximated sigma value of the spectrum
Notes
-----
The spectrum should be continuum subtracted before being passed to this
function.
"""
return computation_wrapper(_compute_gaussian_sigma_width, spectrum, regions)
def gaussian_fwhm(spectrum, regions=None):
"""
Estimate the width of the spectrum using a second-moment analysis.
The value is scaled to match the full width at half max of a standard
Gaussian profile. This will be calculated over the regions, if they are
specified.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the width will be calculated.
regions: `~specutils.utils.SpectralRegion` or list of `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the FWHM value. If regions is
`None`, computation is performed over entire spectrum.
Returns
-------
gaussian_fwhm : `~astropy.units.Quantity` or list (based on region input)
Approximate full width of the signal at half max
Notes
-----
The spectrum should be continuum subtracted before being passed to this
function.
"""
return computation_wrapper(_compute_gaussian_fwhm, spectrum, regions)
def fwhm(spectrum, regions=None):
"""
Compute the true full width half max of the spectrum.
This makes no assumptions about the shape of the spectrum (e.g. whether it
is Gaussian). It finds the maximum of the spectrum, and then locates the
point closest to half max on either side of the maximum, and
measures the distance between them. This will be calculated over the
regions, if they are specified.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object over which the width will be calculated.
regions: `~specutils.utils.SpectralRegion` or list of `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the FWHM value. If regions is
`None`, computation is performed over entire spectrum.
Returns
-------
whm : `~astropy.units.Quantity` or list (based on region input)
Full width of the signal at half max
Notes
-----
The spectrum should be continuum subtracted before being passed to this
function.
"""
return computation_wrapper(_compute_fwhm, spectrum, regions)
def _compute_gaussian_fwhm(spectrum, regions=None):
"""
This is a helper function for the above `gaussian_fwhm()` method.
"""
fwhm = _compute_gaussian_sigma_width(spectrum, regions) * gaussian_sigma_to_fwhm
return fwhm
def _compute_gaussian_sigma_width(spectrum, regions=None):
"""
This is a helper function for the above `gaussian_sigma_width()` method.
"""
if regions is not None:
calc_spectrum = extract_region(spectrum, regions)
else:
calc_spectrum = spectrum
flux = calc_spectrum.flux
spectral_axis = calc_spectrum.spectral_axis
centroid_result = centroid(spectrum, regions)
if flux.ndim > 1:
spectral_axis = np.broadcast_to(spectral_axis, flux.shape, subok=True)
centroid_result = centroid_result[:, np.newaxis]
dx = spectral_axis - centroid_result
sigma = np.sqrt(np.sum((dx * dx) * flux, axis=-1) / np.sum(flux, axis=-1))
return sigma
def _compute_single_fwhm(flux, spectral_axis):
argmax = np.argmax(flux)
halfval = flux[argmax] / 2
left = flux[:argmax] <= halfval
right = flux[argmax+1:] <= halfval
l_idx = np.where(left == True)[0][-1]
r_idx = np.where(right == True)[0][0] + argmax
return spectral_axis[r_idx] - spectral_axis[l_idx]
def _compute_fwhm(spectrum, regions=None):
"""
This is a helper function for the above `fwhm()` method.
"""
if regions is not None:
calc_spectrum = extract_region(spectrum, regions)
else:
calc_spectrum = spectrum
flux = calc_spectrum.flux
spectral_axis = calc_spectrum.spectral_axis
if flux.ndim > 1:
return [_compute_single_fwhm(x, spectral_axis) for x in flux]
else:
return _compute_single_fwhm(flux, spectral_axis)
|
the-stack_0_22594 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.template.base import Library, TemplateSyntaxError, kwarg_re, Node
from django.utils.encoding import smart_text
from django.core.urlresolvers import NoReverseMatch
from .. import reverse_i18n
register = Library()
class URLNode(Node):
def __init__(self, view_name, language, args, kwargs, asvar):
self.view_name = view_name
self.language = language
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_text(k, 'ascii'), v.resolve(context))
for k, v in self.kwargs.items()])
view_name = self.view_name.resolve(context)
language = self.language.resolve(context)
if not view_name:
raise NoReverseMatch(
"'url' requires a non-empty first argument. "
"The syntax changed in Django 1.5, see the docs.")
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse_i18n(view_name, language,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch as e:
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse_i18n(project_name + '.' + view_name,
language,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
raise e
else:
if self.asvar is None:
raise e
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
@register.tag
def i18nurl(parser, token):
"""
Returns an absolute URL matching given view with its parameters in
the good language.
{% i18nurl "path.to.some_view" "language" arg1 arg2 %}
or
{% i18nurl "path.to.some_view" "language" name1=value1 name2=value2 %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two argument"
" (path to a view and language)" % bits[0])
try:
viewname = parser.compile_filter(bits[1])
except TemplateSyntaxError as exc:
exc.args = (exc.args[0] + ". The syntax of 'url' "
"changed in Django 1.5, see the docs."),
raise
language = parser.compile_filter(bits[2])
args = []
kwargs = {}
asvar = None
bits = bits[3:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, language, args, kwargs, asvar)
class CurrentURLNode(Node):
def __init__(self, language, asvar):
self.language = language
self.asvar = asvar
def render(self, context):
try:
resolver_match = context['request'].resolver_match
except AttributeError:
return ''
app_name = resolver_match.app_name
url_name = resolver_match.url_name
args = resolver_match.args
kwargs = resolver_match.kwargs
language = self.language.resolve(context)
if app_name:
view_name = '{app_name}:{url_name}'.format(app_name=app_name,
url_name=url_name)
else:
view_name = '{url_name}'.format(url_name=url_name)
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ''
try:
url = reverse_i18n(view_name, language,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch as e:
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse_i18n(project_name + '.' + view_name,
language,
args=args, kwargs=kwargs,
current_app=context.current_app)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
raise e
else:
if self.asvar is None:
raise e
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
@register.tag
def current_i18nurl(parser, token):
"""
Returns the current page absolute url in the right language.
{% current_i18nurl "language" %}
"""
bits = token.split_contents()
asvar = None
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes one argument"
" (language)" % bits[0])
language = parser.compile_filter(bits[1])
bits = bits[3:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
return CurrentURLNode(language, asvar)
|
the-stack_0_22595 | # -*- coding: utf-8 -*-
import os
import sys
import optparse
import subprocess
#import traci
import numpy as np
#from ControlNs3 import Vehicle_Enter_Network2
from function import Vehicle_Enter_Network
from function import Create_Vehicle
from function import Vehicle_Information
from function import Threshold_Of_Vehicle_Charging
from function import Vehicle_Charging
from function import Charging_Station_Parameter
# =============================================================================
# from function import Print_Vehicle_information
# =============================================================================
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary # noqa
import traci
from ControlNs3 import Ns3LteSimulation
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
# =============================================================================
# <parkingArea id="CS04" lane="536831320#1_0" startPos="5" endPos="23" roadsideCapacity="3" width="5" length="6" angle="0"/>
# =============================================================================
ChargingStation = {}
Vehicle = {}
VehilceDensity = 1
def run():
global Vehicle
global CS
#-------------------------for storing trajectory
#----------------------------------------------
Charging_Station_Parameter(ChargingStation)
msTimer = traci.simulation.getCurrentTime()
while True:
traci.simulationStep()
#多台測試
print("#############################################################")
getCurrentTime = traci.simulation.getCurrentTime()
print("getCurrentTime = ",getCurrentTime)
if (msTimer % 1000) == 0: # create device to network (每1秒增加車輛 Poisson λ=1 k=1)
Create_Vehicle(Vehicle, VehilceDensity)
msTimer = traci.simulation.getCurrentTime()
DepartVeh = traci.simulation.getDepartedIDList()
if len(DepartVeh) > 0:
Vehicle_Enter_Network(DepartVeh, Vehicle)
#Vehicle_Enter_Network2()
ArrivedVeh = traci.simulation.getArrivedIDList()
if ArrivedVeh:
for VehName in ArrivedVeh:
del Vehicle[VehName]
if len(Vehicle) > 0:
getIDList = traci.vehicle.getIDList()
print("getIDList = ",getIDList)
Vehicle_Information(Vehicle)
#Vehicle_Information2()
#得到車輛資訊
# =============================================================================
# Print_Vehicle_information(Vehicle) #輸出車輛資料至route.csv
# =============================================================================
Threshold_Of_Vehicle_Charging(Vehicle,ChargingStation) #偵測車輛是否需要充電 並尋找最佳充電站
Vehicle_Charging(Vehicle,ChargingStation) #正在充電車輛判斷 哪個CS 還要多久?
print("#############################################################")
#=================================================================================
Ns3LteSimulation(simTime=50, ulpacketSize= 512) # function for control ns-3
#=================================================================================
ArrivedVeh = traci.simulation.getArrivedIDList()
if ArrivedVeh:
print("The Car is leaving = " , ArrivedVeh)
sys.stdout.flush()
traci.close()
# this is the main entry point of this script
if __name__ == "__main__":
options = get_options()
if options.nogui:
sumoBinary = checkBinary('sumo')
else:
sumoBinary = checkBinary('sumo-gui')
traci.start([sumoBinary, '-c', os.path.join('data/Simulation', 'map.sumocfg')])
#BatteryTimer = threading.Timer(1, Battery_Timer)
#BatteryTimer.start()
run()
#BatteryTimer.cancel()
|
the-stack_0_22596 | import os
import time
from buildworker_scripts.utils.logutils import Log
import boto3
import botocore
import botocore.exceptions
class S3Session(object):
def __init__(self, logger=None, bucket=None):
self.s3client = None
if logger is None:
self.log = Log(name=__name__)
else:
self.log = logger
self.bucket = bucket
self.makeclient()
def makeclient(self):
self.s3client = boto3.Session().client('s3')
def upload(self, Filename, Key):
if self.s3client is None:
self.makeclient()
try:
self.s3client.upload_file(Bucket=self.bucket, Key=Key, Filename=Filename)
except botocore.exceptions.ClientError as e:
err = e.response['Error']
self.log.warn("{}/{}: {} {}".format(self.bucket, Key, err['Code'], err['Message']))
return False
return True
def download(self, Key, Filename, quiet=True):
if self.s3client is None:
self.makeclient()
try:
info = self.s3client.head_object(Bucket=self.bucket, Key=Key)
self.s3client.download_file(Bucket=self.bucket, Key=Key, Filename=Filename)
if 'LastModified' in info:
mtime = int(time.mktime(info['LastModified'].timetuple()))
os.utime(Filename, (mtime, mtime))
except botocore.exceptions.ClientError as e:
err = e.response['Error']
if quiet and err['Code'] == "404":
self.log.debug(2, "not found: {}/{}".format(self.bucket, Key))
else:
self.log.warn("{}/{}: {} {}".format(self.bucket, Key, err['Code'], err['Message']))
return False
except OSError as e:
if quiet:
pass
self.log.warn("os.utime({}): {} (errno {})".format(Filename, e.strerror, e.errno))
return False
return True
def get_object_info(self, Key, quiet=True):
if self.s3client is None:
self.makeclient()
try:
info = self.s3client.head_object(Bucket=self.bucket, Key=Key)
except botocore.exceptions.ClientError as e:
err = e.response['Error']
if quiet and err['Code'] == "404":
self.log.debug(2, "not found: {}/{}".format(self.bucket, Key))
else:
self.log.warn("{}/{}: {} {}".format(self.bucket, Key, err['Code'], err['Message']))
return None
return info
|
the-stack_0_22598 | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v1/version.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from protoc_gen_swagger.options import annotations_pb2 as protoc__gen__swagger_dot_options_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v1/version.proto',
package='v1',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x10v1/version.proto\x12\x02v1\x1a\x1cgoogle/api/annotations.proto\x1a,protoc-gen-swagger/options/annotations.proto\"6\n\x07Version\x12\x13\n\x0bmin_version\x18\x01 \x01(\t\x12\x16\n\x0elatest_version\x18\x02 \x01(\t\"y\n\x08Versions\x12\x18\n\x10platform_version\x18\x01 \x01(\t\x12\x18\n\x03\x63li\x18\x02 \x01(\x0b\x32\x0b.v1.Version\x12\x1d\n\x08platform\x18\x03 \x01(\x0b\x32\x0b.v1.Version\x12\x1a\n\x05\x61gent\x18\x04 \x01(\x0b\x32\x0b.v1.Version\".\n\nLogHandler\x12\x0b\n\x03\x64sn\x18\x01 \x01(\t\x12\x13\n\x0b\x65nvironment\x18\x02 \x01(\tb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,protoc__gen__swagger_dot_options_dot_annotations__pb2.DESCRIPTOR,])
_VERSION = _descriptor.Descriptor(
name='Version',
full_name='v1.Version',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_version', full_name='v1.Version.min_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='latest_version', full_name='v1.Version.latest_version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=154,
)
_VERSIONS = _descriptor.Descriptor(
name='Versions',
full_name='v1.Versions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform_version', full_name='v1.Versions.platform_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cli', full_name='v1.Versions.cli', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='platform', full_name='v1.Versions.platform', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agent', full_name='v1.Versions.agent', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=277,
)
_LOGHANDLER = _descriptor.Descriptor(
name='LogHandler',
full_name='v1.LogHandler',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dsn', full_name='v1.LogHandler.dsn', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='environment', full_name='v1.LogHandler.environment', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=279,
serialized_end=325,
)
_VERSIONS.fields_by_name['cli'].message_type = _VERSION
_VERSIONS.fields_by_name['platform'].message_type = _VERSION
_VERSIONS.fields_by_name['agent'].message_type = _VERSION
DESCRIPTOR.message_types_by_name['Version'] = _VERSION
DESCRIPTOR.message_types_by_name['Versions'] = _VERSIONS
DESCRIPTOR.message_types_by_name['LogHandler'] = _LOGHANDLER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Version = _reflection.GeneratedProtocolMessageType('Version', (_message.Message,), {
'DESCRIPTOR' : _VERSION,
'__module__' : 'v1.version_pb2'
# @@protoc_insertion_point(class_scope:v1.Version)
})
_sym_db.RegisterMessage(Version)
Versions = _reflection.GeneratedProtocolMessageType('Versions', (_message.Message,), {
'DESCRIPTOR' : _VERSIONS,
'__module__' : 'v1.version_pb2'
# @@protoc_insertion_point(class_scope:v1.Versions)
})
_sym_db.RegisterMessage(Versions)
LogHandler = _reflection.GeneratedProtocolMessageType('LogHandler', (_message.Message,), {
'DESCRIPTOR' : _LOGHANDLER,
'__module__' : 'v1.version_pb2'
# @@protoc_insertion_point(class_scope:v1.LogHandler)
})
_sym_db.RegisterMessage(LogHandler)
# @@protoc_insertion_point(module_scope)
|
the-stack_0_22601 | from flask_app.core.exceptions.generic import Error
class ImageNotFoundException(Error):
def __init__(self,name,message=None):
if message==None:
message= f"Container image '{name}' does not exist on disk. If its there, check if there is a dockerfile in its root directory."
self.message = message
super().__init__(self.message)
class NetworkNameTakenException(Error):
def __init__(self,name,message=None):
if message==None:
message= f"Network name '{name}' is already taken."
self.message = message
super().__init__(self.message)
class InvalidContainerNameException(Error):
def __init__(self,name,message=None):
if message==None:
message= f"Invalid container name \"{ name }\". It must match [a-zA-Z0-9_.-]*"
self.message = message
super().__init__(self.message)
class InvalidNetworkNameException(Error):
def __init__(self,name,message=None):
if message==None:
message= f"Invalid network name \"{ name }\". It must match [a-zA-Z0-9_.-]*"
self.message = message
super().__init__(self.message)
class NoPortsAvailableException(Error):
def __init__(self,message=None):
if message==None:
message= f"All ports are used, no port available."
self.message = message
super().__init__(self.message)
|
the-stack_0_22602 | # Copyright 2022, Lefebvre Dalloz Services
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command line args parser
"""
import argparse
from typing import List
def parse_args(commands: List[str] = None) -> argparse.Namespace:
"""
Parse command line arguments
:param commands: to provide command line programatically
:return: parsed command line
"""
parser = argparse.ArgumentParser(
description="optimize and deploy transformers", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-m", "--model", required=True, help="path to model or URL to Hugging Face hub")
parser.add_argument("-t", "--tokenizer", help="path to tokenizer or URL to Hugging Face hub")
parser.add_argument(
"--task",
default="classification",
choices=["classification", "embedding", "text-generation"],
help="task to manage. embeddings is for sentence-transformers models",
)
parser.add_argument(
"--auth-token",
default=None,
help=(
"Hugging Face Hub auth token. Set to `None` (default) for public models. "
"For private models, use `True` to use local cached token, or a string of your HF API token"
),
)
parser.add_argument(
"-b",
"--batch-size",
default=[1, 1, 1],
help="batch sizes to optimize for (min, optimal, max). Used by TensorRT and benchmarks.",
type=int,
nargs=3,
)
parser.add_argument(
"-s",
"--seq-len",
default=[16, 16, 16],
help="sequence lengths to optimize for (min, optimal, max). Used by TensorRT and benchmarks.",
type=int,
nargs=3,
)
parser.add_argument("-q", "--quantization", action="store_true", help="INT-8 GPU quantization support")
parser.add_argument("-w", "--workspace-size", default=10000, help="workspace size in MiB (TensorRT)", type=int)
parser.add_argument("-o", "--output", default="triton_models", help="name to be used for ")
parser.add_argument("-n", "--name", default="transformer", help="model name to be used in triton server")
parser.add_argument("-v", "--verbose", action="store_true", help="display detailed information")
parser.add_argument(
"--backend",
default=["onnx"],
help="backend to use. multiple args accepted.",
nargs="*",
choices=["onnx", "tensorrt"],
)
parser.add_argument(
"-d",
"--device",
default=None,
help="device to use. If not set, will be cuda if available.",
choices=["cpu", "cuda"],
)
parser.add_argument("--nb-threads", default=1, help="# of CPU threads to use for inference", type=int)
parser.add_argument(
"--nb-instances", default=1, help="# of model instances, may improve throughput (Triton)", type=int
)
parser.add_argument("--warmup", default=10, help="# of inferences to warm each model", type=int)
parser.add_argument("--nb-measures", default=1000, help="# of inferences for benchmarks", type=int)
parser.add_argument("--seed", default=123, help="seed for random inputs, etc.", type=int)
parser.add_argument("--atol", default=3e-1, help="tolerance when comparing outputs to Pytorch ones", type=float)
args, _ = parser.parse_known_args(args=commands)
return args
|
the-stack_0_22603 | import argparse
import yaml
import os
from classifier.action_classifier import ActionClassifier
from datasets.dataset_utils import get_class_labels
from spatial_transforms import *
from visualization.plotters import ResultPlotter
from visualization.visualizer import SyncVideoVisualizer, TopKVisualizer, FPSVisualizer, ClassifiedClassVisualizer
from utils.video_manager import SyncVideoManager
from online.online_opts import *
from online.online_utils import FPSMeasurer
from classifier.action_activation import ActionActivator
from opts import parse_input, parse_model
from online.online_opts import parse_source, parse_dataset, parse_preprocessing, parse_online
DISPLAY_SCALE = 600
def parse_args():
parser = argparse.ArgumentParser()
parse_source(parser)
parse_online(parser)
parse_activator(parser)
parse_preprocessing(parser)
parse_model(parser)
parse_input(parser)
parser.add_argument('--output_file', type=str, default="", help='Leave empty for no output video')
parser.add_argument('--plot', action="store_true", default=False, help="Plotting in real time")
parser.add_argument('--dataset_config', type=str, default="annotation_Jester\Jester.yaml", help="Path to the dataset config")
args = parser.parse_args()
parse_dataset(args)
return args
def main():
opts = parse_args()
class_map = get_class_labels(opts.categories_path)
spatial_transforms = Compose([
CV2ToPIL("BGR"),
Scale(opts.smaller_dimension_size),
CenterCrop(opts.center_crop_size),
ToTensor(1),
Normalize([114.7748, 107.7354, 99.475], [1, 1, 1])
])
sequence_length = opts.sample_duration * opts.downsample
opts.n_classes = len(class_map)
classifier = ActionClassifier(opts=opts)
if opts.source == "camera":
video_capturer = SyncVideoManager(source=opts.camera_index,
sequence_length=sequence_length,
num_frames_skip=opts.skip_frames,
spatial_transforms=spatial_transforms)
elif opts.source == "video":
video_capturer = SyncVideoManager(source=opts.video_path,
sequence_length=sequence_length,
num_frames_skip=opts.skip_frames,
spatial_transforms=spatial_transforms)
else:
raise ValueError("Invalid source")
if opts.plot:
plotter = ResultPlotter(opts.n_classes, prediction_names=("Konstrastne klase", "Predikcije", "Filtrirane", "Otežane", "Cumulative sum"),
x_size=100)
topK_visualizer = TopKVisualizer(class_map,
top_k=5)
fps_display_visualizer = FPSVisualizer(y_position=25, fps_name="Display FPS", color=(255, 255, 0))
fps_model_visualizer = FPSVisualizer(y_position=50, fps_name="Model frequency", color=(255, 0, 0))
class_visualizer = ClassifiedClassVisualizer(class_map, y_position=85)
image_visualizers = [topK_visualizer, fps_display_visualizer, fps_model_visualizer, class_visualizer]
display_spatial_transforms = Compose([
ScaleCV2(DISPLAY_SCALE),
FLipCV2(1)
])
video_visualizer = SyncVideoVisualizer(image_visualizers, display_spatial_transforms)
activation_processing = ActionActivator(opts, opts.contrast_class_indices)
if opts.output_file:
video_capturer.capture_stream()
frame = video_capturer.read_frame()
disp_frame = display_spatial_transforms(frame)
h, w, c = disp_frame.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_name = opts.output_file
video_path = "result_videos/" + video_name +".avi"
out = cv2.VideoWriter(video_path, fourcc, 12, (w, h))
fps_display_measurer = FPSMeasurer()
fps_model_measurer = FPSMeasurer()
while video_capturer.capture_stream():
clip = video_capturer.read_clip()
if clip:
prediction = classifier(clip)
activated_class = activation_processing(prediction)
classifier_state = {
"predictions": prediction,
"activated_class": activated_class
}
video_visualizer.update_state(classifier_state)
fps_model_measurer.operation_complete()
fps_model_visualizer.update_fps(fps_model_measurer.fps())
if opts.plot:
plotter({
"Konstrastne klase": activation_processing.contrast_probabilities,
"Predikcije": prediction,
"Filtrirane": activation_processing.filtered_probabilities,
"Otežane": activation_processing.weighted_probability,
"Cumulative sum" : activation_processing.cum_sum
}, activated_class)
frame = video_capturer.read_frame()
result_frame = video_visualizer.display(frame)
if result_frame is False:
# not really intuitive but the kill switch of the program is in the visualizer
# by pressing q
break
if opts.output_file:
out.write(result_frame)
fps_display_measurer.operation_complete()
fps_display_visualizer.update_fps(fps_display_measurer.fps())
if __name__ == "__main__":
main() |
the-stack_0_22607 | #!/usr/bin/env python
"""
_LoadDBSFilesByDAS_
MySQL implementation of LoadDBSFilesByDAS
"""
from WMCore.Database.DBFormatter import DBFormatter
class LoadDBSFilesByDAS(DBFormatter):
fileInfoSQL = """SELECT dbsbuffer_file.id AS id,
dbsbuffer_file.lfn AS lfn,
dbsbuffer_file.filesize AS filesize,
dbsbuffer_file.events AS events,
dbsbuffer_file.status AS status,
dbsbuffer_file.block_id AS block,
dbsbuffer_algo.app_name AS app_name,
dbsbuffer_algo.app_ver AS app_ver,
dbsbuffer_algo.app_fam AS app_fam,
dbsbuffer_algo.pset_hash AS pset_hash,
dbsbuffer_algo.config_content,
dbsbuffer_dataset.path AS dataset_path,
dbsbuffer_dataset.global_tag AS global_tag,
dbsbuffer_dataset.prep_id AS prep_id,
dbsbuffer_workflow.name AS workflow,
dbsbuffer_workflow.block_close_max_wait_time,
dbsbuffer_workflow.block_close_max_files,
dbsbuffer_workflow.block_close_max_events,
dbsbuffer_workflow.block_close_max_size
FROM dbsbuffer_file
INNER JOIN dbsbuffer_algo_dataset_assoc ON
dbsbuffer_file.dataset_algo = dbsbuffer_algo_dataset_assoc.id
INNER JOIN dbsbuffer_algo ON
dbsbuffer_algo_dataset_assoc.algo_id = dbsbuffer_algo.id
INNER JOIN dbsbuffer_dataset ON
dbsbuffer_algo_dataset_assoc.dataset_id = dbsbuffer_dataset.id AND
dbsbuffer_dataset.path = :datasetpath
INNER JOIN dbsbuffer_workflow ON
dbsbuffer_workflow.id = dbsbuffer_file.workflow
WHERE dbsbuffer_file.status = 'NOTUPLOADED'
AND NOT EXISTS ( SELECT *
FROM dbsbuffer_file_parent
INNER JOIN dbsbuffer_file parent_file ON
parent_file.id = dbsbuffer_file_parent.parent AND
parent_file.status = 'NOTUPLOADED'
WHERE dbsbuffer_file_parent.child = dbsbuffer_file.id )
ORDER BY dbsbuffer_file.id
"""
getLocationSQL = """SELECT dbsbuffer_location.pnn as location, dbsbuffer_file.id as id
FROM dbsbuffer_location
INNER JOIN dbsbuffer_file_location dfl ON dfl.location = dbsbuffer_location.id
INNER JOIN dbsbuffer_file ON dbsbuffer_file.id = dfl.filename
WHERE dbsbuffer_file.id = :fileid"""
getChecksumSQL = """SELECT cst.type AS cktype, fcs.cksum AS cksum, fcs.fileid AS id FROM
dbsbuffer_file_checksums fcs INNER JOIN
dbsbuffer_checksum_type cst ON fcs.typeid = cst.id
WHERE fcs.fileid = :fileid"""
getRunLumiSQL = """SELECT flr.run AS run, flr.lumi AS lumi, flr.num_events AS num_events, dbsbuffer_file.id AS id
FROM dbsbuffer_file_runlumi_map flr
INNER JOIN dbsbuffer_file ON dbsbuffer_file.id = flr.filename
WHERE dbsbuffer_file.id = :fileid"""
getParentLFNSQL = """SELECT dbfa.lfn AS lfn, dbfb.id AS id FROM dbsbuffer_file dbfa
INNER JOIN dbsbuffer_file_parent dfp ON dfp.parent = dbfa.id
INNER JOIN dbsbuffer_file dbfb ON dfp.child = dbfb.id
WHERE dbfb.id = :fileid """
def formatFileInfo(self, result):
"""
_formatFileInfo_
Some databases (Oracle) aren't case sensitive with respect to column
names so we'll do some formatting so the column names are returned as
expected.
"""
resultList = self.formatDict(result)
for resultDict in resultList:
resultDict["appName"] = resultDict["app_name"]
del resultDict["app_name"]
resultDict["appVer"] = resultDict["app_ver"]
del resultDict["app_ver"]
resultDict["appFam"] = resultDict["app_fam"]
del resultDict["app_fam"]
resultDict["psetHash"] = resultDict["pset_hash"]
del resultDict["pset_hash"]
resultDict["configContent"] = resultDict["config_content"]
del resultDict["config_content"]
resultDict["datasetPath"] = resultDict["dataset_path"]
del resultDict["dataset_path"]
resultDict["size"] = resultDict["filesize"]
del resultDict["filesize"]
resultDict["globalTag"] = resultDict['global_tag']
del resultDict['global_tag']
return resultList
def locInfo(self, result):
"""
Format the location info so that it matches
"""
resultList = self.formatDict(result)
interimDictionary = {}
for entry in resultList:
if entry['id'] not in interimDictionary.keys():
interimDictionary[entry['id']] = set()
interimDictionary[entry['id']].add(entry['location'])
finalList = []
for entry in interimDictionary.keys():
tmpDict = {'id': entry, 'locations': interimDictionary[entry]}
finalList.append(tmpDict)
return finalList
def ckInfo(self, result):
"""
Assemble the checksums into the appropriate format.
"""
resultList = self.formatDict(result)
interimDictionary = {}
for entry in resultList:
if entry['id'] not in interimDictionary.keys():
interimDictionary[entry['id']] = {}
interimDictionary[entry['id']][entry['cktype']] = entry['cksum']
finalList = []
for entry in interimDictionary.keys():
tmpDict = {'id': entry, 'checksums': interimDictionary[entry]}
finalList.append(tmpDict)
return finalList
def runInfo(self, result):
"""
Assemble runLumis into the appropriate format
"""
resultList = self.formatDict(result)
interimDictionary = {}
for entry in resultList:
if entry['id'] not in interimDictionary.keys():
interimDictionary[entry['id']] = {}
if entry['run'] not in interimDictionary[entry['id']].keys():
interimDictionary[entry['id']][entry['run']] = []
interimDictionary[entry['id']][entry['run']].append((entry['lumi'], entry["num_events"]))
finalList = []
for entry in interimDictionary.keys():
tmpDict = {'id': entry, 'runInfo': interimDictionary[entry]}
finalList.append(tmpDict)
return finalList
def parentInfo(self, result):
"""
Format the parent info so that it makes sense
"""
resultList = self.formatDict(result)
interimDictionary = {}
for entry in resultList:
if entry['id'] not in interimDictionary.keys():
interimDictionary[entry['id']] = []
interimDictionary[entry['id']].append(entry['lfn'])
finalList = []
for entry in interimDictionary.keys():
tmpDict = {'id': entry, 'parentLFNs': interimDictionary[entry]}
finalList.append(tmpDict)
return finalList
def getBinds(self, files):
binds = []
files = self.dbi.makelist(files)
for f in files:
binds.append({'fileid': f})
return binds
def execute(self, datasetpath, conn = None, transaction = False):
"""
Execute multiple SQL queries to extract all binding information
Use the first query to get the fileIDs
"""
result = self.dbi.processData(self.fileInfoSQL,
{ 'datasetpath' : datasetpath },
conn = conn,
transaction = transaction)
fileInfo = self.formatFileInfo(result)
fileIDs = [x['id'] for x in fileInfo]
binds = self.getBinds(fileIDs)
if len(fileInfo) == 0:
# Then we have no files for this DAS
return []
# Do locations
result = self.dbi.processData(self.getLocationSQL, binds,
conn = conn,
transaction = transaction)
locInfo = self.locInfo(result)
fullResults = self.merge(fileInfo, locInfo)
# Do checksums
result = self.dbi.processData(self.getChecksumSQL, binds,
conn = conn,
transaction = transaction)
ckInfo = self.ckInfo(result)
fullResults = self.merge(fullResults, ckInfo)
# Do runLumi
result = self.dbi.processData(self.getRunLumiSQL, binds,
conn = conn,
transaction = transaction)
runInfo = self.runInfo(result)
fullResults = self.merge(fullResults, runInfo)
# Do parents
result = self.dbi.processData(self.getParentLFNSQL, binds,
conn = conn,
transaction = transaction)
parInfo = self.parentInfo(result)
fullResults = self.merge(fullResults, parInfo)
return fullResults
def merge(self, listA, listB, field = 'id'):
"""
_merge_
Merge together two file lists based on the ID field
"""
for entryA in listA:
for entryB in listB:
if entryA[field] == entryB[field]:
# Then we've found a match
entryA.update(entryB)
break
return listA
def groupByID(self, inputList, key):
"""
Group all the entries in a list of dictionaries together by ID
"""
interimDictionary = {}
for entry in inputList:
if entry['id'] not in interimDictionary.keys():
interimDictionary[entry['id']] = set()
interimDictionary[entry['id']].add(entry[key])
finalList = []
for entry in interimDictionary.keys():
tmpDict = {'id': entry, key: interimDictionary[entry]}
finalList.append(tmpDict)
return finalList
|
the-stack_0_22608 | # Usage: `local_testing_tool.py test_number`, where the argument test_number
# is 0 (Test Set 1), 1 (Test Set 2) or 2 (Test Set 3).
import sys
import random
T = 50
Ns = (10, 50, 50)
Qs = (30000, 30000, 17000)
CORRECT, WRONG = 1, -1
def GenCase(n):
r = list(range(1, n + 1))
random.shuffle(r)
return tuple(r)
def GenCases(n):
return tuple(GenCase(n) for _ in range(T))
class Error(Exception):
pass
class JudgeError(Exception):
pass
INVALID_LINE_ERROR = "Couldn't read a valid line"
TOO_LONG_LINE_ERROR = "Line too long: {} characters".format
WRONG_NUM_TOKENS_ERROR = "Wrong number of tokens, expected 3 or {} got {}".format
NOT_INTEGER_ERROR = "Not an integer: {}".format
OUT_OF_BOUNDS_ERROR = "{} is out of bounds".format
REPEATED_INTEGERS_ERROR = "Received repeated integers: {}".format
TOO_MANY_QUERIES_ERROR = "Queried too many times"
WRONG_ORDER_ERROR = "Guessed wrong order"
CASE_ERROR = "Case #{} failed: {}".format
EXCEPTION_AFTER_END_ERROR = (
"Exception raised while reading input after all cases finish."
)
ADDITIONAL_INPUT_ERROR = "Additional input after all cases finish: {}".format
QUERIES_USED = "Total Queries Used: {}/{}".format
def ParseInteger(line):
try:
return int(line)
except:
raise Error(NOT_INTEGER_ERROR(line))
def ReadValues(n, line):
if len(line) > 1000:
raise Error(TOO_LONG_LINE_ERROR(len(line)))
parts = line.split()
if len(parts) not in (3, n):
# with open("query.txt", "a+") as f:
# f.write(" ".join(str(i) for i in parts) + "\n")
raise Error(WRONG_NUM_TOKENS_ERROR(n, len(parts)))
v = tuple(ParseInteger(parts[i]) for i in range(len(parts)))
for vi in v:
if not 1 <= vi <= n:
raise Error(OUT_OF_BOUNDS_ERROR(vi))
if len(set(v)) != len(v):
raise Error(REPEATED_INTEGERS_ERROR(v))
return v
def Inv(v):
r = list(v)
for i in range(len(r)):
r[v[i] - 1] = i + 1
return tuple(r)
def Mid(pos, v):
if len(v) != 3:
raise JudgeError("Mid called with {} values (expected 3)".format(len(v)))
p = tuple(pos[vi - 1] for vi in v)
min_p, max_p = min(p), max(p)
for vi in v:
if pos[vi - 1] not in (min_p, max_p):
# with open("query.txt", "a+") as f:
# f.write(" ".join(str(i) for i in pos) + "\n")
# f.write(" ".join(str(i) for i in v) + " -- " + str(vi) + "\n")
return vi
def Output(line):
try:
print(line)
sys.stdout.flush()
except:
# If we let stdout be closed by the end of the program, then an unraisable
# broken pipe exception will happen, and we won't be able to finish
# normally.
try:
sys.stdout.close()
except:
pass
def RunCase(case, max_q):
def Input():
try:
return input()
except:
raise Error(INVALID_LINE_ERROR)
pos = Inv(case)
q = 0
while True:
v = ReadValues(len(case), Input())
# with open("tmp.txt", "w+") as f:
# f.write(" ".join(str(i) for i in case) + "\n" + " ".join(str(i) for i in v))
if len(v) == len(case):
if v != case and v != tuple(reversed(case)):
# with open("tmp.txt", "w+") as f:
# f.write(
# " ".join(str(i) for i in case)
# + "\n"
# + " ".join(str(i) for i in v)
# )
raise Error(WRONG_ORDER_ERROR)
return q
if q >= max_q:
raise Error(TOO_MANY_QUERIES_ERROR)
q += 1
Output(Mid(pos, v))
def RunCases(cases, max_q):
Output("{} {} {}".format(len(cases), len(cases[0]), max_q))
tot_q = 0
for i, case in enumerate(cases, 1):
# print(case)
try:
q = RunCase(case, max_q - tot_q)
Output(CORRECT)
tot_q += q
except Error as err:
Output(WRONG)
raise Error(CASE_ERROR(i, err))
try:
extra_input = input()
except EOFError:
return tot_q
except Exception: # pylint: disable=broad-except
raise Error(EXCEPTION_AFTER_END_ERROR)
raise Error(ADDITIONAL_INPUT_ERROR(extra_input[:100]))
def main():
assert len(sys.argv) == 2, "Bad usage"
index = int(sys.argv[1])
random.seed(1234 + index)
assert index in (0, 1, 2)
try:
q = RunCases(GenCases(Ns[index]), Qs[index])
print(QUERIES_USED(q, Qs[index]), file=sys.stderr)
except Error as err:
print(str(err)[:1000], file=sys.stderr)
sys.exit(1)
except Exception as exception:
Output(WRONG)
print(
("JUDGE_ERROR! Internal judge exception: {}".format(exception))[:1000],
file=sys.stderr,
)
sys.exit(1)
if __name__ == "__main__":
main()
|
the-stack_0_22609 | # -*- coding: utf-8 -*-
"""
pyinfluxql.query
~~~~~~~~~~~~~~~~
PyInfluxQL query generator
"""
import re
import six
import datetime
from copy import copy, deepcopy
from dateutil.tz import tzutc
from .functions import Func
from .utils import format_timedelta, format_boolean
UTC_TZ = tzutc()
class Query(object):
binary_op = {
'eq': '=',
'ne': '!=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<='
}
_numeric_types = (int, float)
_order_identifiers = {'asc', 'desc'}
def __init__(self, *expressions):
self._select_expressions = list(expressions)
self._measurement = None
self._is_delete = False
self._limit = None
self._where = {}
self._start_time = None
self._end_time = None
self._group_by_fill = False
self._group_by_time = None
self._group_by = []
self._into_series = None
self._order = None
self._order_by = []
def clone(self):
query = Query().from_(self._measurement)
query._select_expressions = deepcopy(self._select_expressions)
query._is_delete = self._is_delete
query._limit = self._limit
query._where = deepcopy(self._where)
query._group_by_time = copy(self._group_by_time)
query._group_by = deepcopy(self._group_by)
return query
def _format_select_expression(self, expr):
formatted = expr
if issubclass(type(expr), Func):
formatted = expr.format()
return formatted
def _format_select_expressions(self, *select_expressions):
"""Format the function stack to form a function clause
If it's empty and there are no functions we just return the column name
"""
return ", ".join([
self._format_select_expression(s) for s in select_expressions
])
def _format_select(self):
return "SELECT %s" % self._format_select_expressions(
*self._select_expressions)
def _format_measurement(self, measurement):
enquote = (
not (measurement[0] == '/' and measurement[-1] == '/')
and (" " in measurement or "-" in measurement))
if enquote:
return '"%s"' % measurement
return measurement
def _format_from(self):
clause = "FROM %s" % self._format_measurement(self._measurement)
return clause
def _format_value(self, value):
if isinstance(value, six.string_types):
if value[0] == '/':
return value
return "'%s'" % value
elif type(value) is bool:
return format_boolean(value)
elif isinstance(value, self._numeric_types):
return "%r" % value
elif isinstance(value, datetime.datetime):
if value.tzinfo:
value = value.astimezone(UTC_TZ)
dt = datetime.datetime.strftime(value, "%Y-%m-%d %H:%M:%S.%f")
return "'%s'" % dt[:-3]
def _format_where_expression(self, identifiers, comparator, value):
return '%s %s %s' % ('.'.join(identifiers),
self.binary_op[comparator],
self._format_value(value))
def _format_where(self):
if not self._where:
return ''
formatted = []
for expression in sorted(self._where.keys()):
if '__' not in expression:
comparator = 'eq'
identifiers = [expression]
else:
identifiers = expression.split('__')
if identifiers[-1] not in self.binary_op:
comparator = 'eq'
else:
comparator = identifiers[-1]
identifiers = identifiers[:-1]
formatted.append(self._format_where_expression(
identifiers, comparator, self._where[expression]))
return "WHERE %s" % (" AND ".join(formatted))
def _format_group_by(self):
if self._group_by or self._group_by_time:
time_format = []
if self._group_by_time:
time_fmt = None
if isinstance(self._group_by_time, datetime.timedelta):
time_fmt = "time(%s)" % format_timedelta(self._group_by_time)
elif isinstance(self._group_by_time, six.string_types):
time_fmt = "time(%s)" % self._group_by_time
if time_fmt is not None:
time_format.append(time_fmt)
clause = "GROUP BY " + ", ".join(time_format + self._group_by)
if self._group_by_fill:
clause += ' fill(0)'
return clause
return ''
def _format_into(self):
if self._into_series:
return 'INTO %s' % self._into_series
return ''
def _format_limit(self):
clause = ''
if self._limit:
clause = "LIMIT %i" % self._limit
return clause
def _format_order(self):
clause = ''
if self._order:
clause = 'ORDER BY %s %s' % (' '.join(self._order_by), self._order)
return clause
def _format_query(self, query):
"""Trims extra spaces and inserts a semicolon at the end
"""
query = re.sub(r' +', ' ', query)
if query[-1] == ' ':
query = query[:len(query) - 1]
return query + ';'
def _format_delete_query(self):
query = "DELETE %s %s" % (self._format_from(), self._format_where())
return self._format_query(query)
def _format_select_query(self):
query = "%s %s %s %s %s %s %s" % (self._format_select(),
self._format_from(),
self._format_where(),
self._format_group_by(),
self._format_limit(),
self._format_into(),
self._format_order())
return self._format_query(query)
def _format(self):
if self._is_delete:
return self._format_delete_query()
return self._format_select_query()
def from_(self, measurement):
self._measurement = measurement
return self
def select(self, *expressions):
"""Could be a one or more column names or expressions composed of
functions from http://influxdb.org/docs/query_language/functions.html
"""
if not expressions:
raise TypeError("Select takes at least one expression")
self._select_expressions.extend(expressions)
return self
def where(self, **clauses):
"""
.where(something=something,
something__ne=something,
something__lt=somethingelse,
something__gt=somethingelseelse)
See "The Where Clause" at http://influxdb.org/docs/query_language/
OR operations are not supported
TODO:
support OR operations by adding in some kind of _Or function
see http://docs.sqlalchemy.org/en/rel_0_9/orm/tutorial.html#common-filter-operators
"""
self._where.update(clauses)
return self
def date_range(self, start=None, end=None):
"""Insert where clauses to filter by date
"""
if not start and not end:
raise ValueError("date_range requires either a start or end")
elif start and end and start > end:
raise ValueError(
"date_range boundaries should have start <= end, got %r > %r" % (
start, end))
if start:
self._where['time__gt'] = start
self._start_time = start
if end:
self._where['time__lt'] = end
self._end_time = end
return self
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
def group_by(self, *columns, **kwargs):
if 'time' in kwargs and kwargs['time']:
self._group_by_time = kwargs['time']
if 'fill' in kwargs and kwargs['fill']:
self._group_by_fill = True
if columns:
self._group_by.extend(columns)
return self
def group_by_time(self, time, **kwargs):
return self.group_by(time=time, **kwargs)
def into(self, series):
self._into_series = series
return self
def limit(self, n):
self._limit = n
return self
def order(self, field, order):
"""Allows you to order by time ascending or descending.
Time is the only way to order from InfluxDB itself.
"""
if order.lower() not in self._order_identifiers:
raise ValueError("order must either be 'asc' or 'desc'")
self._order_by = [field]
self._order = order.upper()
return self
def __str__(self):
return self._format()
def __unicode__(self):
return six.u(self._format())
class ContinuousQuery(object):
def __init__(self, name, database, query):
self.name = name
self.database = database
self.query = query
def _format(self):
return six.u("""CREATE CONTINUOUS QUERY "{name}" ON {database} BEGIN {query} END""".format(
name=self.name,
database=self.database,
query=str(self.query)).replace(';', ''))
def __str__(self):
return self._format()
def __unicode__(self):
return self._format()
|
the-stack_0_22611 | """Import tasks for the Transient Name Server."""
import csv
import json
import os
import time
import urllib
import warnings
from datetime import datetime, timedelta
from math import ceil
import requests
from astrocats.catalog.photometry import PHOTOMETRY
from astrocats.catalog.spectrum import SPECTRUM
from astrocats.catalog.utils import (is_integer, is_number, jd_to_mjd, pbar,
pretty_num, sortOD)
from decimal import Decimal
from ..kilonova import KILONOVA
def do_tns(catalog):
"""Load TNS metadata."""
session = requests.Session()
task_str = catalog.get_current_task_str()
tns_url = 'https://wis-tns.weizmann.ac.il/'
search_url = tns_url + \
'search?&num_page=1&format=html&sort=desc&order=id&format=csv&page=0'
csvtxt = catalog.load_url(search_url,
os.path.join(catalog.get_current_task_repo(),
'TNS', 'index.csv'))
if not csvtxt:
return
maxid = csvtxt.splitlines()[1].split(',')[0].strip('"')
maxpages = ceil(int(maxid) / 1000.)
for page in pbar(range(maxpages), task_str):
fname = os.path.join(
catalog.get_current_task_repo(), 'TNS',
'page-') + str(page).zfill(2) + '.csv'
if (catalog.current_task.load_archive(catalog.args) and
os.path.isfile(fname) and page < 7):
with open(fname, 'r') as tns_file:
csvtxt = tns_file.read()
else:
with open(fname, 'w') as tns_file:
session = requests.Session()
ses_url = (tns_url + 'search?&num_page=1000&format=html&edit'
'[type]=&edit[objname]=&edit[id]=&sort=asc&order=id'
'&display[redshift]=1'
'&display[hostname]=1&display[host_redshift]=1'
'&display[source_group_name]=1'
'&display[programs_name]=1'
'&display[internal_name]=1'
'&display[isTNS_AT]=1'
'&display[public]=1'
'&display[end_pop_period]=0'
'&display[spectra_count]=1'
'&display[discoverymag]=1&display[discmagfilter]=1'
'&display[discoverydate]=1&display[discoverer]=1'
'&display[sources]=1'
'&display[bibcode]=1&format=csv&page=' + str(page))
try:
response = session.get(ses_url)
csvtxt = response.text
except Exception:
if os.path.isfile(fname):
with open(fname, 'r') as tns_file:
csvtxt = tns_file.read()
else:
continue
else:
tns_file.write(csvtxt)
tsvin = list(csv.reader(csvtxt.splitlines(), delimiter=','))
all_aliases = [y for z in [catalog.entries[x].get_aliases()
for x in catalog.entries] for y in z]
for ri, row in enumerate(pbar(tsvin, task_str, leave=False)):
if ri == 0:
continue
if (not row[4] or (row[4] and 'GW' not in row[4] and
row[4] not in all_aliases)) and (
not row[10] or (row[10] and 'GW' not in row[10] and
row[10] not in all_aliases)):
continue
name = row[1].replace(' ', '')
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
name='Transient Name Server', url=tns_url)
catalog.entries[name].add_quantity(KILONOVA.ALIAS, name, source)
if row[2] and row[2] != '00:00:00.00':
catalog.entries[name].add_quantity(KILONOVA.RA, row[2],
source)
if row[3] and row[3] != '+00:00:00.00':
catalog.entries[name].add_quantity(KILONOVA.DEC, row[3],
source)
if row[4]:
catalog.entries[name].add_quantity(
KILONOVA.CLAIMED_TYPE, row[4].strip(),
source)
if row[5]:
catalog.entries[name].add_quantity(
KILONOVA.REDSHIFT, row[5], source, kind='spectroscopic')
if row[6]:
catalog.entries[name].add_quantity(KILONOVA.HOST, row[6],
source)
if row[7]:
catalog.entries[name].add_quantity(
[KILONOVA.REDSHIFT, KILONOVA.HOST_REDSHIFT],
row[7],
source,
kind='host')
if row[8]:
catalog.entries[name].add_quantity(KILONOVA.DISCOVERER,
row[8], source)
# Currently, all events listing all possible observers. TNS bug?
# if row[9]:
# observers = row[9].split(',')
# for observer in observers:
# catalog.entries[name].add_quantity('observer',
# observer.strip(),
# source)
if row[10]:
catalog.entries[name].add_quantity(KILONOVA.ALIAS, row[10],
source)
if row[16]:
date = row[16].split()[0].replace('-', '/')
if date != '0000/00/00':
date = date.replace('/00', '')
t = row[16].split()[1]
if t != '00:00:00':
ts = t.split(':')
dt = timedelta(
hours=int(ts[0]),
minutes=int(ts[1]),
seconds=int(ts[2]))
date += pretty_num(
dt.total_seconds() / (24 * 60 * 60),
sig=6).lstrip('0')
catalog.entries[name].add_quantity(KILONOVA.DISCOVER_DATE,
date, source)
if catalog.args.travis and ri >= catalog.TRAVIS_QUERY_LIMIT:
break
if catalog.args.update:
catalog.journal_entries()
catalog.journal_entries()
def do_tns_photo(catalog):
"""Load TNS photometry."""
task_str = catalog.get_current_task_str()
tns_url = 'https://wis-tns.weizmann.ac.il/'
try:
with open('tns.key', 'r') as f:
tnskey = f.read().splitlines()[0]
except Exception:
catalog.log.warning('TNS API key not found, make sure a file named '
'`tns.key` containing the key is placed the '
'astrocats directory.')
tnskey = ''
bandreps = {'Clear': 'C'}
fails = 0
for name in pbar(list(catalog.entries.keys()), task_str):
if name not in catalog.entries:
continue
aliases = catalog.entries[name].get_aliases()
oname = ''
for alias in aliases:
if (alias.startswith('AT') and is_integer(alias[2:6]) and
int(alias[2:6]) >= 2016) and alias[6:].isalpha():
oname = alias
break
if not oname:
continue
reqname = oname[2:]
jsonpath = os.path.join(catalog.get_current_task_repo(), 'TNS',
reqname + '.json')
download_json = True
if os.path.isfile(jsonpath):
with open(jsonpath, 'r') as f:
objdict = json.load(f)
if ('discoverydate' in objdict and
(datetime.now() - datetime.strptime(objdict['discoverydate'],
'%Y-%m-%d %H:%M:%S')
).days > 90):
download_json = False
if download_json:
data = urllib.parse.urlencode({
'api_key': tnskey,
'data': json.dumps({
'objname': reqname,
'photometry': '1'
})
}).encode('ascii')
req = urllib.request.Request(
'https://wis-tns.weizmann.ac.il/api/get/object', data=data)
trys = 0
objdict = None
while trys < 3 and not objdict:
try:
objdict = json.loads(
urllib.request.urlopen(req).read().decode('ascii'))[
'data']['reply']
except KeyboardInterrupt:
raise
except Exception:
catalog.log.warning('API request failed for `{}`.'.format(
name))
time.sleep(5)
trys = trys + 1
if (not objdict or 'objname' not in objdict or
not isinstance(objdict['objname'], str)):
fails = fails + 1
catalog.log.warning('Object `{}` not found!'.format(name))
if fails >= 5:
break
continue
# Cache object here
with open(jsonpath, 'w') as f:
json.dump(sortOD(objdict), f, indent='\t',
separators=(',', ':'), ensure_ascii=False,
sort_keys=True)
if 'photometry' not in objdict:
continue
photoarr = objdict['photometry']
name, source = catalog.new_entry(
oname, srcname='Transient Name Server', url=tns_url)
for photo in photoarr:
if 'mag' not in photo['flux_unit']['name'].lower():
catalog.log.warning('Unknown flux unit `{}`.'.format(photo[
'flux_unit']['name']))
continue
if not photo['jd']:
continue
if not photo['flux'] and not photo['limflux']:
continue
mag = photo['flux'] if photo['flux'] else photo['limflux']
photodict = {
PHOTOMETRY.TIME: str(jd_to_mjd(Decimal(str(photo['jd'])))),
PHOTOMETRY.U_TIME: 'MJD',
PHOTOMETRY.MAGNITUDE: mag,
PHOTOMETRY.SOURCE: source
}
if photo.get('fluxerr', ''):
photodict[PHOTOMETRY.E_MAGNITUDE] = photo['fluxerr']
if not photo['flux']:
photodict[PHOTOMETRY.UPPER_LIMIT] = True
band = photo['filters']['name']
if band:
if band in bandreps:
band = bandreps[band]
photodict[PHOTOMETRY.BAND] = band
if photo.get('observer', ''):
photodict[PHOTOMETRY.OBSERVER] = photo['observer']
if 'source_group' in photo:
survey = photo['source_group']['group_name']
if survey:
photodict[PHOTOMETRY.SURVEY] = survey
if 'telescope' in photo:
telescope = photo['telescope']['name']
if telescope and telescope != 'Other':
photodict[PHOTOMETRY.TELESCOPE] = telescope
if 'instrument' in photo:
instrument = photo['instrument']['name']
if instrument and instrument != 'Other':
photodict[PHOTOMETRY.INSTRUMENT] = instrument
system = ''
if 'Vega' in photo['flux_unit']['name']:
system = 'Vega'
elif 'ab' in photo['flux_unit']['name']:
system = 'AB'
if system:
photodict[PHOTOMETRY.SYSTEM] = system
catalog.entries[name].add_photometry(**photodict)
catalog.journal_entries()
return
def do_tns_spectra(catalog):
"""Load TNS spectra."""
requests.packages.urllib3.disable_warnings()
task_str = catalog.get_current_task_str()
tns_url = 'https://wis-tns.weizmann.ac.il/'
try:
with open('tns.key', 'r') as f:
tnskey = f.read().splitlines()[0]
except Exception:
catalog.log.warning('TNS API key not found, make sure a file named '
'`tns.key` containing the key is placed the '
'astrocats directory.')
tnskey = ''
fails = 0
for name in pbar(list(catalog.entries.keys()), task_str):
if name not in catalog.entries:
continue
aliases = catalog.entries[name].get_aliases()
oname = ''
for alias in aliases:
if (alias.startswith('AT') and is_integer(alias[2:6]) and
int(alias[2:6]) >= 2016) and alias[6:].isalpha():
oname = alias
break
if not oname:
continue
reqname = oname[2:]
jsonpath = os.path.join(catalog.get_current_task_repo(), 'TNS', 'meta',
reqname + '.json')
download_json = True
if os.path.isfile(jsonpath):
with open(jsonpath, 'r') as f:
objdict = json.load(f)
if ('discoverydate' in objdict and
(datetime.now() - datetime.strptime(objdict['discoverydate'],
'%Y-%m-%d %H:%M:%S')
).days > 90):
download_json = False
if download_json:
data = urllib.parse.urlencode({
'api_key': tnskey,
'data': json.dumps({
'objname': reqname,
'spectra': '1'
})
}).encode('ascii')
req = urllib.request.Request(
'https://wis-tns.weizmann.ac.il/api/get/object', data=data)
trys = 0
objdict = None
while trys < 3 and not objdict:
try:
objdict = json.loads(
urllib.request.urlopen(req).read().decode('ascii'))[
'data']['reply']
except KeyboardInterrupt:
raise
except Exception:
catalog.log.warning('API request failed for `{}`.'.format(
name))
time.sleep(5)
trys = trys + 1
if (not objdict or 'objname' not in objdict or
not isinstance(objdict['objname'], str)):
fails = fails + 1
catalog.log.warning('Object `{}` not found!'.format(name))
if fails >= 5:
break
continue
# Cache object here
with open(jsonpath, 'w') as f:
json.dump(sortOD(objdict), f, indent='\t',
separators=(',', ':'), ensure_ascii=False,
sort_keys=True)
if 'spectra' not in objdict:
continue
specarr = objdict['spectra']
name, source = catalog.new_entry(
oname, srcname='Transient Name Server', url=tns_url)
for spectrum in specarr:
spectrumdict = {
PHOTOMETRY.SOURCE: source
}
if 'jd' in spectrum:
spectrumdict[SPECTRUM.TIME] = str(
jd_to_mjd(Decimal(str(spectrum['jd']))))
spectrumdict[SPECTRUM.U_TIME] = 'MJD'
if spectrum.get('observer', ''):
spectrumdict[SPECTRUM.OBSERVER] = spectrum['observer']
if spectrum.get('reducer', ''):
spectrumdict[SPECTRUM.OBSERVER] = spectrum['observer']
if 'source_group' in spectrum:
survey = spectrum['source_group']['name']
if survey:
spectrumdict[SPECTRUM.SURVEY] = survey
if 'telescope' in spectrum:
telescope = spectrum['telescope']['name']
if telescope and telescope != 'Other':
spectrumdict[SPECTRUM.TELESCOPE] = telescope
if 'instrument' in spectrum:
instrument = spectrum['instrument']['name']
if instrument and instrument != 'Other':
spectrumdict[SPECTRUM.INSTRUMENT] = instrument
if 'asciifile' in spectrum:
fname = urllib.parse.unquote(
spectrum['asciifile'].split('/')[-1])
spectxt = catalog.load_url(
spectrum['asciifile'],
os.path.join(
catalog.get_current_task_repo(), 'TNS', 'spectra',
fname), archived_mode=True)
data = [x.split() for x in spectxt.splitlines()]
skipspec = False
newdata = []
oldval = ''
for row in data:
if row and '#' not in row[0]:
if (len(row) >= 2 and is_number(row[0]) and
is_number(row[1]) and row[1] != oldval):
newdata.append(row)
oldval = row[1]
if skipspec or not newdata:
warnings.warn('Skipped adding spectrum file ' + fname)
continue
data = [list(i) for i in zip(*newdata)]
wavelengths = data[0]
fluxes = data[1]
errors = ''
if len(data) == 3:
errors = data[1]
if max([float(x) for x in fluxes]) < 1.0e-5:
fluxunit = 'erg/s/cm^2/Angstrom'
else:
fluxunit = 'Uncalibrated'
spectrumdict.update({
SPECTRUM.U_WAVELENGTHS: 'Angstrom',
SPECTRUM.ERRORS: errors,
SPECTRUM.U_FLUXES: fluxunit,
SPECTRUM.U_ERRORS: fluxunit if errors else '',
SPECTRUM.WAVELENGTHS: wavelengths,
SPECTRUM.FLUXES: fluxes
})
catalog.entries[name].add_spectrum(**spectrumdict)
catalog.journal_entries()
return
|
the-stack_0_22612 | """Rules for writing tests with ScalaTest"""
load("@bazel_skylib//lib:dicts.bzl", _dicts = "dicts")
load(
"@io_bazel_rules_scala//scala/private:common_attributes.bzl",
"common_attrs",
"implicit_deps",
"launcher_template",
)
load("@io_bazel_rules_scala//scala/private:common.bzl", "sanitize_string_for_usage")
load("@io_bazel_rules_scala//scala/private:common_outputs.bzl", "common_outputs")
load(
"@io_bazel_rules_scala//scala/private:phases/phases.bzl",
"extras_phases",
"phase_collect_jars_scalatest",
"phase_compile_scalatest",
"phase_coverage_runfiles",
"phase_declare_executable",
"phase_default_info",
"phase_java_wrapper_common",
"phase_merge_jars",
"phase_runfiles_scalatest",
"phase_scalac_provider",
"phase_unused_deps_checker",
"phase_write_executable_scalatest",
"phase_write_manifest",
"run_phases",
)
def _scala_test_impl(ctx):
return run_phases(
ctx,
# customizable phases
[
("scalac_provider", phase_scalac_provider),
("write_manifest", phase_write_manifest),
("unused_deps_checker", phase_unused_deps_checker),
("collect_jars", phase_collect_jars_scalatest),
("java_wrapper", phase_java_wrapper_common),
("declare_executable", phase_declare_executable),
# no need to build an ijar for an executable
("compile", phase_compile_scalatest),
("merge_jars", phase_merge_jars),
("runfiles", phase_runfiles_scalatest),
("coverage_runfiles", phase_coverage_runfiles),
("write_executable", phase_write_executable_scalatest),
("default_info", phase_default_info),
],
)
_scala_test_attrs = {
"main_class": attr.string(
default = "io.bazel.rulesscala.scala_test.Runner",
),
"colors": attr.bool(default = True),
"full_stacktraces": attr.bool(default = True),
"jvm_flags": attr.string_list(),
"_scalatest": attr.label(
default = Label(
"//external:io_bazel_rules_scala/dependency/scalatest/scalatest",
),
),
"_scalatest_runner": attr.label(
cfg = "host",
default = Label("//src/java/io/bazel/rulesscala/scala_test:runner"),
),
"_scalatest_reporter": attr.label(
default = Label("//scala/support:test_reporter"),
),
"_jacocorunner": attr.label(
default = Label("@bazel_tools//tools/jdk:JacocoCoverage"),
),
"_lcov_merger": attr.label(
default = Label("@bazel_tools//tools/test/CoverageOutputGenerator/java/com/google/devtools/coverageoutputgenerator:Main"),
),
}
_test_resolve_deps = {
"_scala_toolchain": attr.label_list(
default = [
Label(
"//external:io_bazel_rules_scala/dependency/scala/scala_library",
),
Label(
"//external:io_bazel_rules_scala/dependency/scalatest/scalatest",
),
],
allow_files = False,
),
}
_scala_test_attrs.update(launcher_template)
_scala_test_attrs.update(implicit_deps)
_scala_test_attrs.update(common_attrs)
_scala_test_attrs.update(_test_resolve_deps)
def make_scala_test(*extras):
return rule(
attrs = _dicts.add(
_scala_test_attrs,
extras_phases(extras),
*[extra["attrs"] for extra in extras if "attrs" in extra]
),
executable = True,
fragments = ["java"],
outputs = _dicts.add(
common_outputs,
*[extra["outputs"] for extra in extras if "outputs" in extra]
),
test = True,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_test_impl,
)
scala_test = make_scala_test()
# This auto-generates a test suite based on the passed set of targets
# we will add a root test_suite with the name of the passed name
def scala_test_suite(
name,
srcs = [],
visibility = None,
use_short_names = False,
**kwargs):
ts = []
i = 0
for test_file in srcs:
i = i + 1
n = ("%s_%s" % (name, i)) if use_short_names else ("%s_test_suite_%s" % (name, sanitize_string_for_usage(test_file)))
scala_test(
name = n,
srcs = [test_file],
visibility = visibility,
unused_dependency_checker_mode = "off",
**kwargs
)
ts.append(n)
native.test_suite(name = name, tests = ts, visibility = visibility)
|
the-stack_0_22613 | import synapse.lib.module as s_module
contracttypes = (
'nda',
'other',
'grant',
'treaty',
'purchase',
'indemnity',
'partnership',
)
class OuModule(s_module.CoreModule):
def getModelDefs(self):
modl = {
'types': (
('ou:sic', ('str', {'regex': r'^[0-9]{4}$'}), {
'doc': 'The four digit Standard Industrial Classification Code.',
'ex': '0111',
}),
('ou:naics', ('str', {'regex': r'^[1-9][0-9]{4}[0-9]?$'}), {
'doc': 'The five or six digit North American Industry Classification System code.',
'ex': '541715',
}),
('ou:isic', ('str', {'regex': r'^[A-Z]([0-9]{2}[0-9]{0,2})?$'}), {
'doc': 'An International Standard Industrial Classification of All Economic Activities (ISIC) code.',
'ex': 'C1393',
}),
('ou:org', ('guid', {}), {
'doc': 'A GUID for a human organization such as a company or military unit.',
}),
('ou:contract', ('guid', {}), {
'doc': 'An contract between multiple entities.',
}),
('ou:contract:type', ('str', {'enum': contracttypes}), {
'doc': 'A pre-defined set of contract types.',
}),
('ou:industry', ('guid', {}), {
'doc': 'An industry classification type.',
}),
('ou:alias', ('str', {'lower': True, 'regex': r'^[0-9a-z_]+$'}), {
'doc': 'An alias for the org GUID.',
'ex': 'vertexproject',
}),
('ou:hasalias', ('comp', {'fields': (('org', 'ou:org'), ('alias', 'ou:alias'))}), {
'deprecated': True,
'doc': 'The knowledge that an organization has an alias.',
}),
('ou:orgnet4', ('comp', {'fields': (('org', 'ou:org'), ('net', 'inet:net4'))}), {
'doc': "An organization's IPv4 netblock.",
}),
('ou:orgnet6', ('comp', {'fields': (('org', 'ou:org'), ('net', 'inet:net6'))}), {
'doc': "An organization's IPv6 netblock.",
}),
('ou:name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name of an organization. This may be a formal name or informal name of the '
'organization.',
'ex': 'acme corporation',
}),
('ou:member', ('comp', {'fields': (('org', 'ou:org'), ('person', 'ps:person'))}), {
'deprecated': True,
'doc': 'Deprecated. Please use ou:position.',
}),
('ou:position', ('guid', {}), {
'doc': 'A position within an org. May be organized into an org chart.',
}),
('ou:suborg', ('comp', {'fields': (('org', 'ou:org'), ('sub', 'ou:org'))}), {
'doc': 'Any parent/child relationship between two orgs. May represent ownership, organizational structure, etc.',
}),
('ou:org:has', ('comp', {'fields': (('org', 'ou:org'), ('node', 'ndef'))}), {
'doc': 'An org owns, controls, or has exclusive use of an object or resource, '
'potentially during a specific period of time.',
}),
('ou:user', ('comp', {'fields': (('org', 'ou:org'), ('user', 'inet:user'))}), {
'doc': 'A user name within an organization.',
}),
('ou:role', ('str', {'lower': True, 'regex': r'^\w+$'}), {
'ex': 'staff',
'doc': 'A named role when participating in an event.',
}),
('ou:attendee', ('guid', {}), {
'doc': 'A node representing a person attending a meeting, conference, or event.',
}),
('ou:meet', ('guid', {}), {
'doc': 'An informal meeting of people which has no title or sponsor. See also: ou:conference.',
}),
('ou:meet:attendee', ('comp', {'fields': (('meet', 'ou:meet'), ('person', 'ps:person'))}), {
'deprecated': True,
'doc': 'Deprecated. Please use ou:attendee.',
}),
('ou:conference', ('guid', {}), {
'doc': 'A conference with a name and sponsoring org.',
}),
('ou:conference:attendee', ('comp', {'fields': (('conference', 'ou:conference'), ('person', 'ps:person'))}), {
'deprecated': True,
'doc': 'Deprecated. Please use ou:attendee.',
}),
('ou:conference:event', ('guid', {}), {
'doc': 'A conference event with a name and associated conference.',
}),
('ou:conference:event:attendee', ('comp', {'fields': (('conference', 'ou:conference:event'), ('person', 'ps:person'))}), {
'deprecated': True,
'doc': 'Deprecated. Please use ou:attendee.',
}),
('ou:contest', ('guid', {}), {
'doc': 'A competitive event resulting in a ranked set of participants.',
}),
('ou:contest:result', ('comp', {'fields': (('contest', 'ou:contest'), ('participant', 'ps:contact'))}), {
'doc': 'The results from a single contest participant.',
}),
('ou:goal', ('guid', {}), {
'doc': 'An assessed or stated goal which may be abstract or org specific.',
}),
('ou:hasgoal', ('comp', {'fields': (('org', 'ou:org'), ('goal', 'ou:goal'))}), {
'doc': 'An org has an assessed or stated goal.',
}),
('ou:campaign', ('guid', {}), {
'doc': 'Represents an orgs activity in pursuit of a goal.',
}),
('ou:id:type', ('guid', {}), {
'doc': 'A type of id number issued by an org.',
}),
('ou:id:value', ('str', {'strip': True}), {
'doc': 'The value of an org:id:number.',
}),
('ou:id:number', ('comp', {'fields': (('type', 'ou:id:type'), ('value', 'ou:id:value'))}), {
'doc': 'A unique id number issued by a specific organization.',
}),
('ou:id:update', ('guid', {}), {
'doc': 'A status update to an org:id:number.',
}),
('ou:award', ('guid', {}), {
'doc': 'An award issued by an organization.',
}),
),
'forms': (
('ou:award', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the award.',
'ex': 'Bachelors of Science',
}),
('type', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The type of award.',
'ex': 'certification',
}),
('org', ('ou:org', {}), {
'doc': 'The organization which issues the award.',
}),
)),
('ou:id:type', {}, (
('org', ('ou:org', {}), {
'doc': 'The org which issues id numbers of this type.',
}),
('name', ('str', {}), {
'doc': 'The friendly name of the id number type.',
}),
)),
('ou:id:number', {}, (
('type', ('ou:id:type', {}), {
'doc': 'The type of org id',
}),
('value', ('ou:id:value', {}), {
'doc': 'The type of org id',
}),
('status', ('str', {'lower': True, 'strip': True}), {
'doc': 'A freeform status such as valid, suspended, expired.',
}),
('issued', ('time', {}), {
'doc': 'The time at which the org issued the ID number.',
}),
('expires', ('time', {}), {
'doc': 'The time at which the ID number expires.',
}),
)),
('ou:id:update', {}, (
('number', ('ou:id:number', {}), {
'doc': 'The id number that was updated.',
}),
('status', ('str', {'strip': True, 'lower': True}), {
'doc': 'The updated status of the id number.',
}),
('time', ('time', {}), {
'doc': 'The date/time that the id number was updated.',
}),
)),
('ou:goal', {}, (
('name', ('str', {}), {
'doc': 'A terse name for the goal.',
}),
('type', ('str', {}), {
'doc': 'A user specified goal type.',
}),
('desc', ('str', {}), {
'doc': 'A description of the goal.',
'disp': {'hint': 'text'},
}),
('prev', ('ou:goal', {}), {
'doc': 'The previous/parent goal in a list or hierarchy.',
}),
)),
('ou:hasgoal', {}, (
('org', ('ou:org', {}), {
'doc': 'The org which has the goal.',
}),
('goal', ('ou:goal', {}), {
'doc': 'The goal which the org has.',
}),
('stated', ('bool', {}), {
'doc': 'Set to true/false if the goal is known to be self stated.',
}),
('window', ('ival', {}), {
'doc': 'Set if a goal has a limited time window.',
}),
)),
('ou:campaign', {}, (
('org', ('ou:org', {}), {
'doc': 'The org carrying out the campaign.',
}),
('goal', ('ou:goal', {}), {
'doc': 'The assessed primary goal of the campaign.',
}),
('actors', ('array', {'type': 'ps:contact', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'Actors who participated in the campaign.',
}),
('goals', ('array', {'type': 'ou:goal', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'Additional assessed goals of the campaign.',
}),
('name', ('str', {}), {
'doc': 'A terse name of the campaign.',
}),
('type', ('str', {}), {
'doc': 'A user specified campaign type.',
}),
('desc', ('str', {}), {
'doc': 'A description of the campaign.',
'disp': {'hint': 'text'},
}),
)),
('ou:org', {}, (
('loc', ('loc', {}), {
'doc': 'Location for an organization.'
}),
('name', ('ou:name', {}), {
'doc': 'The localized name of an organization.',
}),
('type', ('str', {'lower': True, 'strip': True}), {
'ex': 'threat group',
'doc': 'The type of organization.',
}),
('desc', ('str', {}), {
'doc': 'A description of the org.',
}),
('logo', ('file:bytes', {}), {
'doc': 'An image file representing the logo for the organization.',
}),
('names', ('array', {'type': 'ou:name'}), {
'doc': 'A list of alternate names for the organization.',
}),
('alias', ('ou:alias', {}), {
'doc': 'The default alias for an organization.'
}),
('phone', ('tel:phone', {}), {
'doc': 'The primary phone number for the organization.',
}),
('sic', ('ou:sic', {}), {
'deprecated': True,
'doc': 'The Standard Industrial Classification code for the organization.',
}),
('naics', ('ou:naics', {}), {
'deprecated': True,
'doc': 'The North American Industry Classification System code for the organization.',
}),
('industries', ('array', {'type': 'ou:industry', 'uniq': True}), {
'doc': 'The industries associated with the org.',
}),
('us:cage', ('gov:us:cage', {}), {
'doc': 'The Commercial and Government Entity (CAGE) code for the organization.',
}),
('founded', ('time', {}), {
'doc': 'The date on which the org was founded.'}),
('dissolved', ('time', {}), {
'doc': 'The date on which the org was dissolved.'}),
('url', ('inet:url', {}), {
'doc': 'The primary url for the organization.',
}),
('subs', ('array', {'type': 'ou:org'}), {
'doc': 'An array of sub-organizations.'
}),
('orgchart', ('ou:position', {}), {
'doc': 'The root node for an orgchart made up ou:position nodes.',
}),
('hq', ('ps:contact', {}), {
'doc': 'A collection of contact information for the "main office" of an org.',
}),
('locations', ('array', {'type': 'ps:contact'}), {
'doc': 'An array of contacts for facilities operated by the org.',
}),
('dns:mx', ('array', {'type': 'inet:fqdn'}), {
'doc': 'An array of MX domains used by email addresses issued by the org.',
}),
)),
('ou:position', {}, (
('org', ('ou:org', {}), {
'doc': 'The org which has the position.',
}),
('contact', ('ps:contact', {}), {
'doc': 'The contact info for the person who holds the position.',
}),
('title', ('str', {'lower': True, 'onespace': True, 'strip': True}), {
'doc': 'The title of the position.',
}),
('reports', ('array', {'type': 'ou:position'}), {
'doc': 'An array of positions which report to this position.',
}),
)),
('ou:name', {}, ()),
('ou:contract', {}, (
('title', ('str', {}), {
'doc': 'A terse title for the contract.'}),
('types', ('array', {'type': 'ou:contract:type', 'uniq': True, 'split': ','}), {
'doc': 'A list of types that apply to the contract.'}),
('sponsor', ('ps:contact', {}), {
'doc': 'The contract sponsor.'}),
('parties', ('array', {'type': 'ps:contact', 'uniq': True}), {
'doc': 'The non-sponsor entities bound by the contract.'}),
('document', ('file:bytes', {}), {
'doc': 'The best/current contract document.'}),
('signed', ('time', {}), {
'doc': 'The date that the contract signing was complete.'}),
('begins', ('time', {}), {
'doc': 'The date that the contract goes into effect.'}),
('expires', ('time', {}), {
'doc': 'The date that the contract expires.'}),
('completed', ('time', {}), {
'doc': 'The date that the contract was completed.'}),
('terminated', ('time', {}), {
'doc': 'The date that the contract was terminated.'}),
('award:price', ('econ:currency', {}), {
'doc': 'The value of the contract at time of award.'}),
('purchase', ('econ:purchase', {}), {
'doc': 'Purchase details of the contract.'}),
('requirements', ('array', {'type': 'ou:goal', 'uniq': True}), {
'doc': 'The requirements levied upon the parties.'}),
)),
('ou:industry', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'A terse name for the industry.'}),
('subs', ('array', {'type': 'ou:industry', 'uniq': True, 'split': ','}), {
'doc': 'An array of sub-industries.'}),
('sic', ('array', {'type': 'ou:sic', 'uniq': True, 'split': ','}), {
'doc': 'An array of SIC codes that map to the industry.'}),
('naics', ('array', {'type': 'ou:naics', 'uniq': True, 'split': ','}), {
'doc': 'An array of NAICS codes that map to the industry.'}),
('isic', ('array', {'type': 'ou:isic', 'uniq': True, 'split': ','}), {
'doc': 'An array of ISIC codes that map to the industry.'}),
)),
('ou:hasalias', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The org guid which has the alias.',
}),
('alias', ('ou:alias', {}), {
'ro': True,
'doc': 'Alias for the organization.',
}),
)),
('ou:orgnet4', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The org guid which owns the netblock.',
}),
('net', ('inet:net4', {}), {
'ro': True,
'doc': 'Netblock owned by the organization.',
}),
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name that the organization assigns to this netblock.'
}),
)),
('ou:orgnet6', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The org guid which owns the netblock.',
}),
('net', ('inet:net6', {}), {
'ro': True,
'doc': 'Netblock owned by the organization.',
}),
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name that the organization assigns to this netblock.'
}),
)),
('ou:member', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The GUID of the org the person is a member of.',
}),
('person', ('ps:person', {}), {
'ro': True,
'doc': 'The GUID of the person that is a member of an org.',
}),
('title', ('str', {'lower': True, 'strip': True}), {
'doc': 'The persons normalized title.'
}),
('start', ('time', {'ismin': True}), {
'doc': 'Earliest known association of the person with the org.',
}),
('end', ('time', {'ismax': True}), {
'doc': 'Most recent known association of the person with the org.',
})
)),
('ou:suborg', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The org which owns the sub organization.',
}),
('sub', ('ou:org', {}), {
'ro': True,
'doc': 'The sub org which owned by the org.',
}),
('perc', ('int', {'min': 0, 'max': 100}), {
'doc': 'The optional percentage of sub which is owned by org.',
}),
('founded', ('time', {}), {
'doc': 'The date on which the suborg relationship was founded.',
}),
('dissolved', ('time', {}), {
'doc': 'The date on which the suborg relationship was dissolved.',
}),
('current', ('bool', {}), {
'doc': 'Bool indicating if the suborg relationship still current.',
}),
)),
('ou:org:has', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The org who owns or controls the object or resource.',
}),
('node', ('ndef', {}), {
'ro': True,
'doc': 'The object or resource that is owned or controlled by the org.',
}),
('node:form', ('str', {}), {
'ro': True,
'doc': 'The form of the object or resource that is owned or controlled by the org.',
}),
)),
('ou:user', {}, (
('org', ('ou:org', {}), {
'ro': True,
'doc': 'The org guid which owns the netblock.',
}),
('user', ('inet:user', {}), {
'ro': True,
'doc': 'The username associated with the organization.',
}),
)),
('ou:attendee', {}, (
('person', ('ps:contact', {}), {
'doc': 'The contact information for the person who attended the event.',
}),
('arrived', ('time', {}), {
'doc': 'The time when the person arrived.',
}),
('departed', ('time', {}), {
'doc': 'The time when the person departed.',
}),
('roles', ('array', {'type': 'ou:role', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'List of the roles the person had at the event.',
}),
('meet', ('ou:meet', {}), {
'doc': 'The meeting that the person attended.',
}),
('conference', ('ou:conference', {}), {
'doc': 'The conference that the person attended.',
}),
('conference:event', ('ou:conference:event', {}), {
'doc': 'The conference event that the person attended.',
}),
('contest', ('ou:contest', {}), {
'doc': 'The contest that the person attended.',
}),
)),
('ou:meet', {}, (
('name', ('str', {'lower': True}), {
'doc': 'A human friendly name for the meeting.',
}),
('start', ('time', {}), {
'doc': 'The date / time the meet starts.',
}),
('end', ('time', {}), {
'doc': 'The date / time the meet ends.',
}),
('place', ('geo:place', ()), {
'doc': 'The geo:place node where the meet was held.',
}),
)),
('ou:meet:attendee', {}, (
('meet', ('ou:meet', {}), {
'ro': True,
'doc': 'The meeting which was attended.',
}),
('person', ('ps:person', {}), {
'ro': True,
'doc': 'The person who attended the meeting.',
}),
('arrived', ('time', {}), {
'doc': 'The time when a person arrived to the meeting.',
}),
('departed', ('time', {}), {
'doc': 'The time when a person departed from the meeting.',
}),
)),
('ou:conference', {}, (
('org', ('ou:org', {}), {
'doc': 'The org which created/managed the conference.',
}),
('organizer', ('ps:contact', {}), {
'doc': 'Contact information for the primary organizer of the conference.',
}),
('sponsors', ('array', {'type': 'ps:contact'}), {
'doc': 'An array of contacts which sponsored the conference.',
}),
('name', ('str', {'lower': True}), {
'doc': 'The full name of the conference.',
'ex': 'decfon 2017',
}),
('desc', ('str', {'lower': True}), {
'doc': 'A description of the conference.',
'ex': 'annual cybersecurity conference',
'disp': {'hint': 'text'},
}),
('base', ('str', {'lower': True, 'strip': True}), {
'doc': 'The base name which is shared by all conference instances.',
'ex': 'defcon',
}),
('start', ('time', {}), {
'doc': 'The conference start date / time.',
}),
('end', ('time', {}), {
'doc': 'The conference end date / time.',
}),
('place', ('geo:place', ()), {
'doc': 'The geo:place node where the conference was held.',
}),
('url', ('inet:url', ()), {
'doc': 'The inet:url node for the conference website.',
}),
)),
('ou:conference:attendee', {}, (
('conference', ('ou:conference', {}), {
'ro': True,
'doc': 'The conference which was attended.',
}),
('person', ('ps:person', {}), {
'ro': True,
'doc': 'The person who attended the conference.',
}),
('arrived', ('time', {}), {
'doc': 'The time when a person arrived to the conference.',
}),
('departed', ('time', {}), {
'doc': 'The time when a person departed from the conference.',
}),
('role:staff', ('bool', {}), {
'doc': 'The person worked as staff at the conference.',
}),
('role:speaker', ('bool', {}), {
'doc': 'The person was a speaker or presenter at the conference.',
}),
('roles', ('array', {'type': 'str', 'lower': True}), {
'doc': 'List of the roles the person had at the conference.',
}),
)),
('ou:conference:event', {}, (
('conference', ('ou:conference', {}), {
'ro': True,
'doc': 'The conference to which the event is associated.',
}),
('organizer', ('ps:contact', {}), {
'doc': 'Contact information for the primary organizer of the event.',
}),
('sponsors', ('array', {'type': 'ps:contact'}), {
'doc': 'An array of contacts which sponsored the event.',
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place where the event occurred.',
}),
('name', ('str', {'lower': True}), {
'doc': 'The name of the conference event.',
'ex': 'foobar conference dinner',
}),
('desc', ('str', {'lower': True}), {
'doc': 'A description of the conference event.',
'ex': 'foobar conference networking dinner at ridge hotel',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', ()), {
'doc': 'The inet:url node for the conference event website.',
}),
('contact', ('ps:contact', ()), {
'doc': 'Contact info for the event.',
}),
('start', ('time', {}), {
'doc': 'The event start date / time.',
}),
('end', ('time', {}), {
'doc': 'The event end date / time.',
}),
)),
('ou:conference:event:attendee', {}, (
('event', ('ou:conference:event', {}), {
'ro': True,
'doc': 'The conference event which was attended.',
}),
('person', ('ps:person', {}), {
'ro': True,
'doc': 'The person who attended the conference event.',
}),
('arrived', ('time', {}), {
'doc': 'The time when a person arrived to the conference event.',
}),
('departed', ('time', {}), {
'doc': 'The time when a person departed from the conference event.',
}),
('roles', ('array', {'type': 'str', 'lower': True}), {
'doc': 'List of the roles the person had at the conference event.',
}),
)),
('ou:contest', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the contest.',
'ex': 'defcon ctf 2020',
}),
('type', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The type of contest.',
'ex': 'cyber ctf',
}),
('family', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'A name for a series of recurring contests.',
'ex': 'defcon ctf',
}),
('desc', ('str', {'lower': True}), {
'doc': 'A description of the contest.',
'ex': 'the capture-the-flag event hosted at defcon 2020',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The contest website URL.'
}),
('start', ('time', {}), {
'doc': 'The contest start date / time.',
}),
('end', ('time', {}), {
'doc': 'The contest end date / time.',
}),
('loc', ('loc', {}), {
'doc': 'The geopolitical affiliation of the contest.',
}),
('place', ('geo:place', {}), {
'doc': 'The geo:place where the contest was held.',
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The latlong where the contest was held.',
}),
('conference', ('ou:conference', {}), {
'doc': 'The conference that the contest is associated with.',
}),
('contests', ('array', {'type': 'ou:contest', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'An array of sub-contests that contributed to the rankings.',
}),
('sponsors', ('array', {'type': 'ps:contact', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'Contact information for contest sponsors.',
}),
('organizers', ('array', {'type': 'ps:contact', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'Contact information for contest organizers.',
}),
('participants', ('array', {'type': 'ps:contact', 'split': ',', 'uniq': True, 'sorted': True}), {
'doc': 'Contact information for contest participants.',
}),
)),
('ou:contest:result', {}, (
('contest', ('ou:contest', {}), {
'ro': True,
'doc': 'The contest.',
}),
('participant', ('ps:contact', {}), {
'ro': True,
'doc': 'The participant',
}),
('rank', ('int', {}), {
'doc': 'The rank order of the participant.',
}),
('score', ('int', {}), {
'doc': 'The score of the participant.',
}),
('url', ('inet:url', {}), {
'doc': 'The contest result website URL.',
}),
#TODO duration ('duration'
)),
)
}
name = 'ou'
return ((name, modl),)
|
the-stack_0_22614 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Soapindel(MakefilePackage):
"""SOAPindel is focusing on calling indels from the next-generation
paired-end sequencing data."""
homepage = "http://soap.genomics.org.cn/soapindel.html"
version('2.1.7.17', '317ef494173969cdc6a8244dd87d06bd',
url='http://soap.genomics.org.cn/down/SOAPindel_20130918_2.1.7.17.zip')
depends_on('perl', type=('build', 'run'))
build_directory = 'indel_detection.release'
def install(self, spec, prefix):
with working_dir('indel_detection.release'):
install_tree('tools', prefix.tools)
mkdirp(prefix.lib)
install('affine_align.pm', prefix.lib)
install('indel_lib.pm', prefix.lib)
mkdirp(prefix.bin)
install('assemble_align', prefix.bin)
install('cluster_reads', prefix.bin)
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PERL5LIB', self.prefix.lib)
run_env.prepend_path('PATH', self.prefix.tools)
|
the-stack_0_22616 | from flask import Flask, render_template, redirect, request
from forms import LoginForm
from database import Database
from forms import PlaceForm
from forms import CityForm
from forms import StateForm
from forms import PlacetagForm
from forms import states_detailsForm
from forms import city_detailsForm
from forms import place_detailsForm
from forms import InputForm
from forms import RegisterForm
from client import getRecommendations
from forms import UserLoginForm
app = Flask(__name__,
static_url_path='',
static_folder='static')
app.config['SECRET_KEY'] = 'you-will-never-guess'
#from app import route
@app.route('/hello')
def hello():
return "Hello universe"
#loops
# making list of pokemons
Pokemons =["Pikachu", "Charizard", "Squirtle", "Jigglypuff",
"Bulbasaur", "Gengar", "Charmander", "Mew", "Lugia", "Gyarados"]
@app.route('/sample1')
def sample1():
return render_template("sample1.html", len = len(Pokemons), Pokemons = Pokemons)
app.run(use_reloader = True, debug = True)
@app.route('/sample')
def sample():
return render_template('sample.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
print('login')
message = ''
form = LoginForm()
if form.validate_on_submit():
# print('Login requested for username {}, password {}'.format(form.username.data, form.password.data))
# return redirect('/index')
db = Database()
id = db.checkLogin(form.username.data, form.password.data)
db.close()
print('id', id)
if id != -1:
return redirect('/states')
else:
message = 'Invalid username or password'
return render_template('login.html', form=form, message=message)
@app.route('/userlogin', methods=['GET', 'POST'])
def userlogin():
print('userlogin')
message = ''
form = UserLoginForm()
if form.validate_on_submit():
# print('Login requested for username {}, password {}'.format(form.username.data, form.password.data))
# return redirect('/index')
db = Database()
result = db.userlogin(form.username.data, form.password.data)
db.close()
print(result)
if len(result)>0:
return redirect('/input')
else:
message = 'Invalid username or password'
return render_template('userlogin.html', form=form, message=message)
@app.route('/register', methods=['GET', 'POST'])
def register():
print('register')
message = ''
form = RegisterForm()
if form.validate_on_submit():
db = Database()
id = db.register(form.username.data, form.fname.data, form.lname.data, form.password.data)
db.close()
print('id', id)
if id != -1:
return redirect('/userlogin')
else:
message = 'Invalid username or password'
return render_template('register.html', form=form, message=message)
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/service')
def service():
return render_template('service.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/states',methods=['GET', 'POST'])
def states():
message = ''
form = StateForm()
db = Database()
if form.validate_on_submit():
# print('Input requested for state{}, ' .format(form.state.data))
# return redirect('/index')
db.addState(form.name.data)
db.commit()
#print('id', id)
#if id != -1:
return redirect('/states')
# else:
# message = 'Invalid state'
States=db.getStates()
db.close()
return render_template('states.html',form=form, message=message,States=States,len=len(States))
# @app.route('/city', methods=['GET', 'POST'])
# def city():
# message = ''
# form = CityForm()
# placeid=request.args.get('placeid')
# form = states_detailsForm()
# db = Database()
# if form.validate_on_submit():
# # print('Input requested for state{}, city{}, ' .format(form.state.data, form.city.data))
# # return redirect('/index')
# id = db.addCity(form.name.data)
# db.commit()
# # print('id', id)
# # if id != -1:
# return redirect('/place?placeid='+placeid)
# # else:
# # message = 'Invalid state or city'
# Cities=db.getCities()
# db.close()
# return render_template('city.html', form=form, message=message,city=Cities,len=len(Cities))
@app.route('/place', methods=['GET', 'POST'])
def place():
message = ''
form = states_detailsForm()
form = PlaceForm()
if form.is_submitted():
# return redirect('/index')
db = Database()
cityid=request.args.get('cityid')
print('cityid',cityid)
id = db.addPlace(form.place.data, form.desc.data,form.longitude.data,form.latitude.data,cityid)
db.close()
print('id', id)
if id != -1:
return redirect('/index')
else:
message = 'Invalid place'
return render_template('place.html', form=form, message=message)
@app.route('/placetag', methods=['GET', 'POST'])
def placetag():
message = ''
form = PlacetagForm()
if form.is_submitted():
# return redirect('/index')
db = Database()
cityid=request.args.get('cityid')
print('cityid',cityid)
id = db.addPlacetag(form.placetag.data, form.tag.data)
db.close()
print('id', id)
if id != -1:
return redirect('/index')
else:
message = 'Invalid place'
return render_template('placetag.html', form=form, message=message)
@app.route('/states_details', methods=['GET', 'POST'])
def states_details():
message = ''
stateid=request.args.get('stateid')
form = states_detailsForm()
db = Database()
if form.is_submitted():
if(form.add.data):
db.addCity(form.name.data,stateid,form.desc.data, form.longitude.data, form.latitude.data)
else:
db.executeUpdate("update state set name='"+form.newstate.data+"' where stateid="+stateid)
db.close()
print('id', id)
return redirect('/states_details?stateid='+stateid)
cities=db.getCities(stateid)
state=db.getStateById(stateid)
return render_template('states_details.html', form=form, message=message, len=len(cities),cities=cities,state=state)
@app.route('/citydetails', methods=['GET', 'POST'])
def citydetails():
message = ''
id=request.args.get('id')
form = city_detailsForm()
db=Database()
if form.is_submitted():
# return redirect('/index')
#cityid=request.args.get('cityid')
#print('cityid',cityid)
if(form.add.data):
db.addPlace(form.name.data,form.desc.data, form.longitude.data, form.latitude.data,id)
else:
db.executeUpdate("update city set name='"+form.newcity.data+"' where id="+id)
db.close()
print('id', id)
return redirect('/citydetails?id='+id)
places=db.getplaces(id)
city=db.getCityById(id)
return render_template('citydetails.html', form=form, message=message,len=len(places),places=places, city=city)
@app.route('/placedetails', methods=['GET', 'POST'])
def placedetails():
placeid=request.args.get('placeid')
form=place_detailsForm()
db=Database()
if form.is_submitted():
print('form.add.data',form.add.data,form.placetag.data)
if(form.add.data):
db.executeUpdate('insert into place_tag values ('+placeid+",'"+form.placetag.data+"')")
db.close()
return redirect('/placedetails?placeid='+placeid)
place=db.executeQuery('select * from place where placeid='+str(placeid))[0]
placetags=db.executeQuery('select * from place_tag where placeid='+placeid)
db.close()
return render_template('placedetails.html',place=place,placetags=placetags,taglen=len(placetags))
@app.route('/removeplacetag',methods=['GET','POST'])
def removeplacetag():
placeid=request.args.get('placeid')
tag=request.args.get('tag')
db=Database()
db.executeUpdate('delete from place_tag where placeid='+placeid+" and name='"+tag+"'")
db.close()
return redirect('/placedetails?placeid='+placeid)
@app.route('/distancetest')
def distancetest():
db=Database()
distance=db.getdistance(7,8)
print('distance'+str(distance))
@app.route('/input', methods=['GET','POST'])
def input():
db=Database()
form = InputForm()
if form.is_submitted():
homecity=form.homecity.data
selectedcities=request.form.getlist('selectedcities')
locationtags=request.form.getlist('location_tag')
minbudget=form.minbudget.data
maxbudget=form.maxbudget.data
print(homecity)
print(selectedcities)
print(locationtags)
print(minbudget)
print(maxbudget)
itineraries=db.getItineraries(homecity,selectedcities,locationtags,minbudget,maxbudget)
recommendations=getRecommendations(1,selectedcities)
recom_cities=[]
db=Database()
c=db.getCursor()
for x in recommendations:
c.execute('select * from city where id='+str(x))
city=c.fetchall()
recom_cities.append(city[0])
return render_template('result.html',itineraries=itineraries,recom_cities=recom_cities)
cities=db.executeQuery("select * from city order by name")
tags=db.executeQuery("select distinct(name) as name from place_tag order by name")
return render_template('input.html',cities=cities,len=len(cities),tags=tags,tagslen=len(tags))
# @app.route('/input')
# def input():
# db=Database()
# cities=db.executeQuery("select * from city order by name")
# tags=db.executeQuery("select distinct(name) as name from place_tag order by name")
# return render_template('input.html',cities=cities,len=len(cities),tags=tags,tagslen=len(tags)) |
the-stack_0_22620 | r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|\_____________________________
| | getresponse() raises
| response = getresponse() | ConnectionError
v v
Unread-response Idle
[Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import email.parser
import email.message
import http
import io
import os
import re
import socket
import collections
from urllib.parse import urlsplit
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
"responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# hack to maintain backwards compatibility
globals().update(http.HTTPStatus.__members__)
# another hack to maintain backwards compatibility
# Mapping status codes to official W3C names
responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
_MAXHEADERS = 100
# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
#
# VCHAR = %x21-7E
# obs-text = %x80-FF
# header-field = field-name ":" OWS field-value OWS
# field-name = token
# field-value = *( field-content / obs-fold )
# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
# field-vchar = VCHAR / obs-text
#
# obs-fold = CRLF 1*( SP / HTAB )
# ; obsolete line folding
# ; see Section 3.2.4
# token = 1*tchar
#
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
# ; any VCHAR, except delimiters
#
# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
# the patterns for both name and value are more lenient than RFC
# definitions to allow for backwards compatibility
_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*').fullmatch
_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
# These characters are not allowed within HTTP URL paths.
# See https://tools.ietf.org/html/rfc3986#section-3.3 and the
# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.
# Prevents CVE-2019-9740. Includes control characters such as \r\n.
# We don't restrict chars above \x7f as putrequest() limits us to ASCII.
_contains_disallowed_url_pchar_re = re.compile('[\x00-\x20\x7f]')
# Arguably only these _should_ allowed:
# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")
# We are more lenient for assumed real world compatibility purposes.
# We always set the Content-Length header for these methods because some
# servers will otherwise respond with a 411
_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
def _encode(data, name='data'):
"""Call data.encode("latin-1") but show a better error message."""
try:
return data.encode("latin-1")
except UnicodeEncodeError as err:
raise UnicodeEncodeError(
err.encoding,
err.object,
err.start,
err.end,
"%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
"if you want to send it encoded in UTF-8." %
(name.title(), data[err.start:err.end], name)) from None
class HTTPMessage(email.message.Message):
# XXX The only usage of this method is in
# http.server.CGIHTTPRequestHandler. Maybe move the code there so
# that it doesn't need to be part of the public API. The API has
# never been defined so this could cause backwards compatibility
# issues.
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp, _class=HTTPMessage):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
headers = []
while True:
line = fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
headers.append(line)
if len(headers) > _MAXHEADERS:
raise HTTPException("got more than %d headers" % _MAXHEADERS)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=_class).parsestr(hstring)
class HTTPResponse(io.BufferedIOBase):
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, method=None, url=None):
# If the response includes a content-length header, we need to
# make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. This will
# happen if a self.fp.read() is done (without a size) whether
# self.fp is buffered or not. So, no self.fp.read() by
# clients unless they know what they are doing.
self.fp = sock.makefile("rb")
self.debuglevel = debuglevel
self._method = method
# The HTTPResponse object is returned via urllib. The clients
# of http and urllib expect different attributes for the
# headers. headers is used here and supports urllib. msg is
# provided as a backwards compatibility layer for http
# clients.
self.headers = self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
if len(line) > _MAXLINE:
raise LineTooLong("status line")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise RemoteDisconnected("Remote end closed connection without"
" response")
try:
version, status, reason = line.split(None, 2)
except ValueError:
try:
version, status = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail.
version = ""
if not version.startswith("HTTP/"):
self._close_conn()
raise BadStatusLine(line)
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.headers is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline(_MAXLINE + 1)
if len(skip) > _MAXLINE:
raise LineTooLong("header line")
skip = skip.strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.code = self.status = status
self.reason = reason.strip()
if version in ("HTTP/1.0", "HTTP/0.9"):
# Some servers might still return "0.9", treat it as 1.0 anyway
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
else:
raise UnknownProtocol(version)
self.headers = self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.headers:
print("header:", hdr + ":", self.headers.get(hdr))
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = True
self.chunk_left = None
else:
self.chunked = False
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.headers.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.headers.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = True
def _check_close(self):
conn = self.headers.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.headers.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.headers.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.headers.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def _close_conn(self):
fp = self.fp
self.fp = None
fp.close()
def close(self):
try:
super().close() # set "closed" flag
finally:
if self.fp:
self._close_conn()
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
def flush(self):
super().flush()
if self.fp:
self.fp.flush()
def readable(self):
"""Always returns True"""
return True
# End of "raw stream" methods
def isclosed(self):
"""True if the connection is closed."""
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return b""
if self._method == "HEAD":
self._close_conn()
return b""
if amt is not None:
# Amount is given, implement using readinto
b = bytearray(amt)
n = self.readinto(b)
return memoryview(b)[:n].tobytes()
else:
# Amount is not given (unbounded read) so we must check self.length
# and self.chunked
if self.chunked:
return self._readall_chunked()
if self.length is None:
s = self.fp.read()
else:
try:
s = self._safe_read(self.length)
except IncompleteRead:
self._close_conn()
raise
self.length = 0
self._close_conn() # we read everything
return s
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b and return the number
of bytes read.
"""
if self.fp is None:
return 0
if self._method == "HEAD":
self._close_conn()
return 0
if self.chunked:
return self._readinto_chunked(b)
if self.length is not None:
if len(b) > self.length:
# clip the read to the "end of response"
b = memoryview(b)[0:self.length]
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
n = self.fp.readinto(b)
if not n and b:
# Ideally, we would raise IncompleteRead if the content-length
# wasn't satisfied, but it might break compatibility.
self._close_conn()
elif self.length is not None:
self.length -= n
if not self.length:
self._close_conn()
return n
def _read_next_chunk_size(self):
# Read the next chunk size from the file
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("chunk size")
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
return int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self._close_conn()
raise
def _read_and_discard_trailer(self):
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("trailer line")
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line in (b'\r\n', b'\n', b''):
break
def _get_chunk_left(self):
# return self.chunk_left, reading a new chunk if necessary.
# chunk_left == 0: at the end of the current chunk, need to close it
# chunk_left == None: No current chunk, should read next.
# This function returns non-zero or None if the last chunk has
# been read.
chunk_left = self.chunk_left
if not chunk_left: # Can be 0 or None
if chunk_left is not None:
# We are at the end of chunk, discard chunk end
self._safe_read(2) # toss the CRLF at the end of the chunk
try:
chunk_left = self._read_next_chunk_size()
except ValueError:
raise IncompleteRead(b'')
if chunk_left == 0:
# last chunk: 1*("0") [ chunk-extension ] CRLF
self._read_and_discard_trailer()
# we read everything; close the "file"
self._close_conn()
chunk_left = None
self.chunk_left = chunk_left
return chunk_left
def _readall_chunked(self):
assert self.chunked != _UNKNOWN
value = []
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
break
value.append(self._safe_read(chunk_left))
self.chunk_left = 0
return b''.join(value)
except IncompleteRead:
raise IncompleteRead(b''.join(value))
def _readinto_chunked(self, b):
assert self.chunked != _UNKNOWN
total_bytes = 0
mvb = memoryview(b)
try:
while True:
chunk_left = self._get_chunk_left()
if chunk_left is None:
return total_bytes
if len(mvb) <= chunk_left:
n = self._safe_readinto(mvb)
self.chunk_left = chunk_left - n
return total_bytes + n
temp_mvb = mvb[:chunk_left]
n = self._safe_readinto(temp_mvb)
mvb = mvb[n:]
total_bytes += n
self.chunk_left = 0
except IncompleteRead:
raise IncompleteRead(bytes(b[0:total_bytes]))
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(b''.join(s), amt)
s.append(chunk)
amt -= len(chunk)
return b"".join(s)
def _safe_readinto(self, b):
"""Same as _safe_read, but for reading into a buffer."""
total_bytes = 0
mvb = memoryview(b)
while total_bytes < len(b):
if MAXAMOUNT < len(mvb):
temp_mvb = mvb[0:MAXAMOUNT]
n = self.fp.readinto(temp_mvb)
else:
n = self.fp.readinto(mvb)
if not n:
raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
mvb = mvb[n:]
total_bytes += n
return total_bytes
def read1(self, n=-1):
"""Read with at most one underlying system call. If at least one
byte is buffered, return that instead.
"""
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._read1_chunked(n)
if self.length is not None and (n < 0 or n > self.length):
n = self.length
try:
result = self.fp.read1(n)
except ValueError:
if n >= 0:
raise
# some implementations, like BufferedReader, don't support -1
# Read an arbitrarily selected largeish chunk.
result = self.fp.read1(16*1024)
if not result and n:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def peek(self, n=-1):
# Having this enables IOBase.readline() to read more than one
# byte at a time
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
return self._peek_chunked(n)
return self.fp.peek(n)
def readline(self, limit=-1):
if self.fp is None or self._method == "HEAD":
return b""
if self.chunked:
# Fallback to IOBase readline which uses peek() and read()
return super().readline(limit)
if self.length is not None and (limit < 0 or limit > self.length):
limit = self.length
result = self.fp.readline(limit)
if not result and limit:
self._close_conn()
elif self.length is not None:
self.length -= len(result)
return result
def _read1_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
chunk_left = self._get_chunk_left()
if chunk_left is None or n == 0:
return b''
if not (0 <= n <= chunk_left):
n = chunk_left # if n is negative or larger than chunk_left
read = self.fp.read1(n)
self.chunk_left -= len(read)
if not read:
raise IncompleteRead(b"")
return read
def _peek_chunked(self, n):
# Strictly speaking, _get_chunk_left() may cause more than one read,
# but that is ok, since that is to satisfy the chunked protocol.
try:
chunk_left = self._get_chunk_left()
except IncompleteRead:
return b'' # peek doesn't worry about protocol
if chunk_left is None:
return b'' # eof
# peek is allowed to return more than requested. Just request the
# entire chunk, and truncate what we get.
return self.fp.peek(chunk_left)[:chunk_left]
def fileno(self):
return self.fp.fileno()
def getheader(self, name, default=None):
'''Returns the value of the header matching *name*.
If there are multiple matching headers, the values are
combined into a single string separated by commas and spaces.
If no matching header is found, returns *default* or None if
the *default* is not specified.
If the headers are unknown, raises http.client.ResponseNotReady.
'''
if self.headers is None:
raise ResponseNotReady()
headers = self.headers.get_all(name) or default
if isinstance(headers, str) or not hasattr(headers, '__iter__'):
return headers
else:
return ', '.join(headers)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.headers is None:
raise ResponseNotReady()
return list(self.headers.items())
# We override IOBase.__iter__ so that it doesn't check for closed-ness
def __iter__(self):
return self
# For compatibility with old-style urllib responses.
def info(self):
'''Returns an instance of the class mimetools.Message containing
meta-information associated with the URL.
When the method is HTTP, these headers are those returned by
the server at the head of the retrieved HTML page (including
Content-Length and Content-Type).
When the method is FTP, a Content-Length header will be
present if (as is now usual) the server passed back a file
length in response to the FTP retrieval request. A
Content-Type header will be present if the MIME type can be
guessed.
When the method is local-file, returned headers will include
a Date representing the file's last-modified time, a
Content-Length giving file size, and a Content-Type
containing a guess at the file's type. See also the
description of the mimetools module.
'''
return self.headers
def geturl(self):
'''Return the real URL of the page.
In some cases, the HTTP server redirects a client to another
URL. The urlopen() function handles this transparently, but in
some cases the caller needs to know which URL the client was
redirected to. The geturl() method can be used to get at this
redirected URL.
'''
return self.url
def getcode(self):
'''Return the HTTP status code that was sent with the response,
or None if the URL is not an HTTP URL.
'''
return self.status
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
@staticmethod
def _is_textIO(stream):
"""Test whether a file-like object is a text or a binary stream.
"""
return isinstance(stream, io.TextIOBase)
@staticmethod
def _get_content_length(body, method):
"""Get the content-length based on the body.
If the body is None, we set Content-Length: 0 for methods that expect
a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
any method if the body is a str or bytes-like object and not a file.
"""
if body is None:
# do an explicit check for not None here to distinguish
# between unset and set but empty
if method.upper() in _METHODS_EXPECTING_BODY:
return 0
else:
return None
if hasattr(body, 'read'):
# file-like object.
return None
try:
# does it implement the buffer protocol (bytes, bytearray, array)?
mv = memoryview(body)
return mv.nbytes
except TypeError:
pass
if isinstance(body, str):
return len(body)
return None
def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
self.timeout = timeout
self.source_address = source_address
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
(self.host, self.port) = self._get_hostport(host, port)
# This is stored as an instance variable to allow unit
# tests to replace it with a suitable mockup
self._create_connection = socket.create_connection
def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTML connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
port = self.default_port
else:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
self._tunnel_port)
connect_bytes = connect_str.encode("ascii")
self.send(connect_bytes)
for header, value in self._tunnel_headers.items():
header_str = "%s: %s\r\n" % (header, value)
header_bytes = header_str.encode("latin-1")
self.send(header_bytes)
self.send(b'\r\n')
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
if code != http.HTTPStatus.OK:
self.close()
raise OSError("Tunnel connection failed: %d %s" % (code,
message.strip()))
while True:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
# for sites which EOF without sending a trailer
break
if line in (b'\r\n', b'\n', b''):
break
if self.debuglevel > 0:
print('header:', line.decode())
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = self._create_connection(
(self.host,self.port), self.timeout, self.source_address)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._tunnel_host:
self._tunnel()
def close(self):
"""Close the connection to the HTTP server."""
self.__state = _CS_IDLE
try:
sock = self.sock
if sock:
self.sock = None
sock.close() # close it manually... there may be other refs
finally:
response = self.__response
if response:
self.__response = None
response.close()
def send(self, data):
"""Send `data' to the server.
``data`` can be a string object, a bytes object, an array object, a
file-like object that supports a .read() method, or an iterable object.
"""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
if self.debuglevel > 0:
print("send:", repr(data))
blocksize = 8192
if hasattr(data, "read") :
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(data)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while 1:
datablock = data.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
self.sock.sendall(datablock)
return
try:
self.sock.sendall(data)
except TypeError:
if isinstance(data, collections.Iterable):
for d in data:
self.sock.sendall(d)
else:
raise TypeError("data should be a bytes-like object "
"or an iterable, got %r" % type(data))
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _read_readable(self, readable):
blocksize = 8192
if self.debuglevel > 0:
print("sendIng a read()able")
encode = self._is_textIO(readable)
if encode and self.debuglevel > 0:
print("encoding file using iso-8859-1")
while True:
datablock = readable.read(blocksize)
if not datablock:
break
if encode:
datablock = datablock.encode("iso-8859-1")
yield datablock
def _send_output(self, message_body=None, encode_chunked=False):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
A message_body may be specified, to be appended to the request.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
self.send(msg)
if message_body is not None:
# create a consistent interface to message_body
if hasattr(message_body, 'read'):
# Let file-like take precedence over byte-like. This
# is needed to allow the current position of mmap'ed
# files to be taken into account.
chunks = self._read_readable(message_body)
else:
try:
# this is solely to check to see if message_body
# implements the buffer API. it /would/ be easier
# to capture if PyObject_CheckBuffer was exposed
# to Python.
memoryview(message_body)
except TypeError:
try:
chunks = iter(message_body)
except TypeError:
raise TypeError("message_body should be a bytes-like "
"object or an iterable, got %r"
% type(message_body))
else:
# the object implements the buffer interface and
# can be passed directly into socket methods
chunks = (message_body,)
for chunk in chunks:
if not chunk:
if self.debuglevel > 0:
print('Zero length chunk ignored')
continue
if encode_chunked and self._http_vsn == 11:
# chunked encoding
chunk = f'{len(chunk):X}\r\n'.encode('ascii') + chunk \
+ b'\r\n'
self.send(chunk)
if encode_chunked and self._http_vsn == 11:
# end chunked transfer
self.send(b'0\r\n\r\n')
def putrequest(self, method, url, skip_host=False,
skip_accept_encoding=False):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest(self.__state)
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
# Prevent CVE-2019-9740.
match = _contains_disallowed_url_pchar_re.search(url)
if match:
raise InvalidURL(f"URL can't contain control characters. {url!r} "
f"(found at least {match.group()!r})")
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
if self._tunnel_host:
host = self._tunnel_host
port = self._tunnel_port
else:
host = self.host
port = self.port
try:
host_enc = host.encode("ascii")
except UnicodeEncodeError:
host_enc = host.encode("idna")
# As per RFC 273, IPv6 address should be wrapped with []
# when used as Host header
if host.find(':') >= 0:
host_enc = b'[' + host_enc + b']'
if port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
if not _is_legal_header_name(header):
raise ValueError('Invalid header name %r' % (header,))
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('latin-1')
elif isinstance(one_value, int):
values[i] = str(one_value).encode('ascii')
if _is_illegal_header_value(values[i]):
raise ValueError('Invalid header value %r' % (values[i],))
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None, *, encode_chunked=False):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional message_body
argument can be used to pass a message body associated with the
request.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body, encode_chunked=encode_chunked)
def request(self, method, url, body=None, headers={}, *,
encode_chunked=False):
"""Send a complete request to the server."""
self._send_request(method, url, body, headers, encode_chunked)
def _send_request(self, method, url, body, headers, encode_chunked):
# Honor explicitly requested Host: and Accept-Encoding: headers.
header_names = frozenset(k.lower() for k in headers)
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
# chunked encoding will happen if HTTP/1.1 is used and either
# the caller passes encode_chunked=True or the following
# conditions hold:
# 1. content-length has not been explicitly set
# 2. the body is a file or iterable, but not a str or bytes-like
# 3. Transfer-Encoding has NOT been explicitly set by the caller
if 'content-length' not in header_names:
# only chunk body if not explicitly set for backwards
# compatibility, assuming the client code is already handling the
# chunking
if 'transfer-encoding' not in header_names:
# if content-length cannot be automatically determined, fall
# back to chunked encoding
encode_chunked = False
content_length = self._get_content_length(body, method)
if content_length is None:
if body is not None:
if self.debuglevel > 0:
print('Unable to determine size of %r' % body)
encode_chunked = True
self.putheader('Transfer-Encoding', 'chunked')
else:
self.putheader('Content-Length', str(content_length))
else:
encode_chunked = False
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
# RFC 2616 Section 3.7.1 says that text default has a
# default charset of iso-8859-1.
body = _encode(body, 'body')
self.endheaders(body, encode_chunked=encode_chunked)
def getresponse(self):
"""Get the response from the server.
If the HTTPConnection is in the correct state, returns an
instance of HTTPResponse or of whatever object is returned by
the response_class variable.
If a request has not been sent or if a previous response has
not be handled, ResponseNotReady is raised. If the HTTP
response indicates that the connection should be closed, then
it will be closed before the response is returned. When the
connection is closed, the underlying socket is closed.
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady(self.__state)
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
method=self._method)
else:
response = self.response_class(self.sock, method=self._method)
try:
try:
response.begin()
except ConnectionError:
self.close()
raise
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
except:
response.close()
raise
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
# XXX Should key_file and cert_file be deprecated in favour of context?
def __init__(self, host, port=None, key_file=None, cert_file=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, *, context=None,
check_hostname=None):
super(HTTPSConnection, self).__init__(host, port, timeout,
source_address)
if (key_file is not None or cert_file is not None or
check_hostname is not None):
import warnings
warnings.warn("key_file, cert_file and check_hostname are "
"deprecated, use a custom context instead.",
DeprecationWarning, 2)
self.key_file = key_file
self.cert_file = cert_file
if context is None:
context = ssl._create_default_https_context()
will_verify = context.verify_mode != ssl.CERT_NONE
if check_hostname is None:
check_hostname = context.check_hostname
if check_hostname and not will_verify:
raise ValueError("check_hostname needs a SSL context with "
"either CERT_OPTIONAL or CERT_REQUIRED")
if key_file or cert_file:
context.load_cert_chain(cert_file, key_file)
self._context = context
self._check_hostname = check_hostname
def connect(self):
"Connect to a host on a given (SSL) port."
super().connect()
if self._tunnel_host:
server_hostname = self._tunnel_host
else:
server_hostname = self.host
self.sock = self._context.wrap_socket(self.sock,
server_hostname=server_hostname)
if not self._context.check_hostname and self._check_hostname:
try:
ssl.match_hostname(self.sock.getpeercert(), server_hostname)
except Exception:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return '%s(%i bytes read%s)' % (self.__class__.__name__,
len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
class RemoteDisconnected(ConnectionResetError, BadStatusLine):
def __init__(self, *pos, **kw):
BadStatusLine.__init__(self, "")
ConnectionResetError.__init__(self, *pos, **kw)
# for backwards compatibility
error = HTTPException
|
the-stack_0_22622 | from scipy.io import wavfile
from scipy import fft, arange
import matplotlib.pyplot as plt
import numpy as np
def plot(source, img_name, sample_rate=48000):
"""
Command that loads an audio signal and generates a graphical
representation
of amplitude and frequency
"""
grate, gdata = wavfile.read(source)
gy = gdata[:,1]
gtimp = len(gy) / sample_rate
gt = np.linspace(0, gtimp, len(gy))
f, plots = plt.subplots(1, 2, figsize=(10, 3))
plots[0].plot(gt, gy)
plots[0].set_xlabel('Time')
plots[0].set_ylabel('Amplitude')
plots[0].grid(True)
n = len(gy) # lungime semnal
k = arange(n)
T = n / sample_rate
frq = k / T # two sides frequency range
frq = frq[list(range(int(n / 2)))] # one side frequency range
Y = fft(gy) / n # fft computing and normalization
Y = Y[list(range(int(n / 2)))]
plots[1].plot(frq, abs(Y), 'r') # plotting the spectrum
plots[1].set_xlabel('Freq (Hz)')
plots[1].set_ylabel('Magnitude / Power')
plots[1].grid(True)
plots[1].set_xscale('log')
plt.tight_layout()
plt.savefig(img_name)
|
the-stack_0_22624 | from rest_framework.serializers import Field
from wagtail.core.models import Page
class BreadcrumpSerializer(Field):
def get_parent_or_none(self, page) -> Page or None:
parent = page.get_parent()
if parent is None:
return None
if parent.url is None:
return None
if parent.url == '/':
return None
return parent
def to_representation(self, page: Page):
parent = page
while parent is not None:
yield {
'id': parent.id,
'url': parent.url,
'slug': parent.slug,
'title': parent.title
}
parent = self.get_parent_or_none(parent)
|
the-stack_0_22625 | import numpy as np
import atexit
import bpy
import gpu
from gpu_extras.batch import batch_for_shader
from photogrammetry_importer.blender_utility.logging_utility import log_report
def _compute_transformed_coords(object_anchor_matrix_world, positions):
if len(positions) == 0:
return []
pos_arr = np.asarray(positions)
ones_arr = np.ones((pos_arr.shape[0], 1))
pos_hom_arr = np.hstack((pos_arr, ones_arr))
# Transpose the matrix to transform the coordinates
# with a single matrix multiplication
pos_hom_arr_transposed = np.transpose(pos_hom_arr)
transf_pos_hom_transposed_arr = np.matmul(
object_anchor_matrix_world, pos_hom_arr_transposed
)
transf_pos_arr_hom = transf_pos_hom_transposed_arr.T
# Delete the homogeneous entries
transf_pos_arr = np.delete(transf_pos_arr_hom, -1, axis=1)
transf_pos_list = transf_pos_arr.tolist()
return transf_pos_list
class DrawManager:
"""Class that allows to represent point clouds with OpenGL in Blender."""
def __init__(self):
self._anchor_to_draw_callback_handler = {}
self._anchor_to_point_coords = {}
self._anchor_to_point_colors = {}
@classmethod
def get_singleton(cls):
"""Return a singleton of this class."""
if hasattr(bpy.types.Object, "current_draw_manager"):
draw_manger = bpy.types.Object.current_draw_manager
else:
draw_manger = cls()
bpy.types.Object.current_draw_manager = draw_manger
return draw_manger
def register_points_draw_callback(
self, object_anchor, coords, colors, point_size
):
"""Register a callback to draw a point cloud."""
draw_callback_handler = _DrawCallBackHandler()
draw_callback_handler.register_points_draw_callback(
self, object_anchor, coords, colors, point_size
)
self._anchor_to_draw_callback_handler[
object_anchor
] = draw_callback_handler
self._anchor_to_point_coords[object_anchor] = coords
self._anchor_to_point_colors[object_anchor] = colors
def get_coords_and_colors(self, visible_only=False):
"""Return the coordinates and the colors of the maintained points."""
transf_coord_list = []
color_list = []
for object_anchor in self._anchor_to_point_coords:
if visible_only and not object_anchor.visible_get():
continue
coords = self._anchor_to_point_coords[object_anchor]
transf_coord_list = (
transf_coord_list
+ _compute_transformed_coords(
object_anchor.matrix_world, coords
)
)
colors = self._anchor_to_point_colors[object_anchor]
color_list = color_list + colors
return transf_coord_list, color_list
def delete_anchor(self, object_anchor):
"""Delete the anchor used to control the pose of the point cloud."""
del self._anchor_to_point_coords[object_anchor]
del self._anchor_to_point_colors[object_anchor]
# del self._anchor_to_draw_callback_handler[object_anchor]
def get_draw_callback_handler(self, object_anchor):
"""Get the draw callback handler corresponding to the object anchor."""
return self._anchor_to_draw_callback_handler[object_anchor]
class _DrawCallBackHandler:
"""Class that allows to handle point drawing callbacks."""
def __init__(self):
self._shader = gpu.shader.from_builtin("3D_FLAT_COLOR")
# Handle to the function
self._draw_handler_handle = None
# Handle to the object
self._object_anchor_pose_previous = np.array([])
self._batch_cached = None
self._point_size = 5
# If Blender is closed and self._batch_cached is not properly deleted,
# this causes something like the following:
# "Error: Not freed memory blocks: 2, total unfreed memory 0.001358 MB"
atexit.register(self._clean_batch_cached)
def _clean_batch_cached(self):
"""Clean the cached batch used to draw the points."""
self._batch_cached = None
def set_point_size(self, point_size):
"""Set the point size used to draw the points in the 3D point cloud."""
self._point_size = point_size
def _draw_points_callback(
self, draw_manager, object_anchor, positions, colors
):
"""A callback function to draw a point cloud in Blender's 3D view."""
handle_is_valid = True
try:
# Check if object still exists
object_anchor_name = object_anchor.name
except:
handle_is_valid = False
if handle_is_valid:
if object_anchor_name in bpy.data.objects:
# Use the visibility of the object to enable /
# disable the drawing of the point cloud
if bpy.data.objects[object_anchor_name].visible_get():
# Update the batch depending on the anchor pose (only if
# necessary)
object_anchor_has_changed = not np.array_equal(
self._object_anchor_pose_previous,
object_anchor.matrix_world,
)
if self._batch_cached is None or object_anchor_has_changed:
self._object_anchor_pose_previous = np.copy(
object_anchor.matrix_world
)
transf_pos_list = _compute_transformed_coords(
object_anchor.matrix_world, positions
)
self._batch_cached = batch_for_shader(
self._shader,
"POINTS",
{"pos": transf_pos_list, "color": colors},
)
self._shader.bind()
gpu.state.point_size_set(self._point_size)
previous_depth_mask_value = gpu.state.depth_mask_get()
previous_depth_test_value = gpu.state.depth_test_get()
gpu.state.depth_mask_set(True)
gpu.state.depth_test_set("LESS_EQUAL")
self._batch_cached.draw(self._shader)
gpu.state.depth_mask_set(previous_depth_mask_value)
gpu.state.depth_test_set(previous_depth_test_value)
else:
if self._draw_handler_handle is not None:
log_report(
"INFO",
"Removing draw handler of deleted point cloud handle",
)
bpy.types.SpaceView3D.draw_handler_remove(
self._draw_handler_handle, "WINDOW"
)
self._draw_handler_handle = None
self._batch_cached = None
draw_manager.delete_anchor(object_anchor)
def register_points_draw_callback(
self, draw_manager, object_anchor, positions, colors, point_size
):
"""Register a callback to draw a point cloud."""
self.set_point_size(point_size)
args = (draw_manager, object_anchor, positions, colors)
self._draw_handler_handle = bpy.types.SpaceView3D.draw_handler_add(
self._draw_points_callback, args, "WINDOW", "POST_VIEW"
)
|
the-stack_0_22626 | # Subsample the scan. For each point, find a closest point on the
# wall of the arena.
# From those point pairs, estimate a transform and apply this to the pose.
# Repeat the closest point - estimate transform loop.
# This is an ICP algorithm.
# 05_c_icp_wall_transform
# Claus Brenner, 17 NOV 2012
from lego_robot import *
from slam_b_library import\
filter_step, concatenate_transform
from slam_04_a_project_landmarks import write_cylinders
from slam_04_d_apply_transform_question import\
estimate_transform, apply_transform, correct_pose
from slam_05_a_find_wall_pairs_question import\
get_subsampled_points, get_corresponding_points_on_wall
# ICP: Iterate the steps of transforming the points, selecting point pairs, and
# estimating the transform. Returns the final transformation.
def get_icp_transform(world_points, iterations):
# Iterate assignment and estimation of trafo a few times.
# --->>> Implement your code here.
overall_trafo = (1.0, 1.0, 0.0, 0.0, 0.0)
for j in range(iterations):
trafo_pts = [apply_transform(overall_trafo, p) for p in world_points]
left, right = get_corresponding_points_on_wall(trafo_pts)
trafo = estimate_transform(left, right, fix_scale = True)
if trafo:
overall_trafo = concatenate_transform(trafo, overall_trafo)
# You may use the following strategy:
# Start with the identity transform:
# overall_trafo = (1.0, 1.0, 0.0, 0.0, 0.0)
# Then loop for j in range(iterations):
# Transform the world_points using the curent overall_trafo
# (see 05_b on how to do this)
# Call get_correspoinding_points_on_wall(...)
# Determine transformation which is needed "on top of" the current
# overall_trafo: trafo = estimate_transform(...)
# Concatenate the found transformation with the current overall_trafo
# to obtain a new, 'combined' transformation. You may use the function
# overall_trafo = concatenate_transform(trafo, overall_trafo)
# to concatenate two similarities.
# Note also that estimate_transform may return None.
#
# Return the final transformation.
return overall_trafo
if __name__ == '__main__':
# The constants we used for the filter_step.
scanner_displacement = 30.0
ticks_to_mm = 0.349
robot_width = 150.0
# The start pose we obtained miraculously.
pose = (1850.0, 1897.0, 3.717551306747922)
# Read the logfile which contains all scans.
logfile = LegoLogfile()
logfile.read("robot4_motors.txt")
logfile.read("robot4_scan.txt")
# Iterate over all positions.
out_file = open("icp_wall_transform.txt", "w")
for i in range(len(logfile.scan_data)):
# Compute the new pose.
pose = filter_step(pose, logfile.motor_ticks[i],
ticks_to_mm, robot_width,
scanner_displacement)
# Subsample points.
subsampled_points = get_subsampled_points(logfile.scan_data[i])
world_points = [LegoLogfile.scanner_to_world(pose, c)
for c in subsampled_points]
# Get the transformation.
# You may play withe the number of iterations here to see
# the effect on the trajectory!
trafo = get_icp_transform(world_points, iterations = 40)
# Correct the initial position using trafo.
pose = correct_pose(pose, trafo)
# Write to file.
# The pose.
print("F %f %f %f" % pose, file=out_file)
# Write the scanner points and corresponding points.
write_cylinders(out_file, "W C",
[apply_transform(trafo, p) for p in world_points])
out_file.close()
|
the-stack_0_22627 | from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from nodeconductor.backup import models, tasks
from nodeconductor.backup.tests import factories
from nodeconductor.iaas.tests.factories import InstanceFactory
from nodeconductor.iaas.models import Instance
class DeleteExpiredBackupsTaskTest(TestCase):
def setUp(self):
self.expired_backup1 = factories.BackupFactory(kept_until=timezone.now() - timedelta(minutes=1))
self.expired_backup2 = factories.BackupFactory(kept_until=timezone.now() - timedelta(minutes=10))
def test_command_starts_backend_deletion(self):
tasks.delete_expired_backups()
self.assertEqual(models.Backup.objects.get(pk=self.expired_backup1.pk).state, models.Backup.States.DELETING)
self.assertEqual(models.Backup.objects.get(pk=self.expired_backup2.pk).state, models.Backup.States.DELETING)
class ExecuteScheduleTaskTest(TestCase):
def setUp(self):
self.not_active_schedule = factories.BackupScheduleFactory(is_active=False)
backupable = InstanceFactory(state=Instance.States.OFFLINE)
self.schedule_for_execution = factories.BackupScheduleFactory(backup_source=backupable)
self.schedule_for_execution.next_trigger_at = timezone.now() - timedelta(minutes=10)
self.schedule_for_execution.save()
self.future_schedule = factories.BackupScheduleFactory()
self.future_schedule.next_trigger_at = timezone.now() + timedelta(minutes=2)
self.future_schedule.save()
def test_command_does_not_create_backups_created_for_not_active_schedules(self):
tasks.execute_schedules()
self.assertEqual(self.not_active_schedule.backups.count(), 0)
def test_command_create_one_backup_created_for_schedule_with_next_trigger_in_past(self):
tasks.execute_schedules()
self.assertEqual(self.schedule_for_execution.backups.count(), 1)
def test_command_does_not_create_backups_created_for_schedule_with_next_trigger_in_future(self):
tasks.execute_schedules()
self.assertEqual(self.future_schedule.backups.count(), 0)
|
the-stack_0_22629 | from edalize_common import make_edalize_test
import pytest
def test_ise(make_edalize_test):
name = 'test_ise_0'
tool_options = {
'family': 'spartan6',
'device': 'xc6slx45',
'package': 'csg324',
'speed': '-2'
}
tf = make_edalize_test('ise',
test_name=name,
param_types=['vlogdefine', 'vlogparam'],
tool_options=tool_options)
tf.backend.configure()
tf.compare_files(['Makefile', 'config.mk',
name + '.tcl', name + '_run.tcl'])
tf.backend.build()
tf.compare_files(['xtclsh.cmd'])
def test_ise_missing_options(make_edalize_test):
tool_options = {
'family': 'spartan6',
'device': 'xc6slx45',
'package': 'csg324',
}
tf = make_edalize_test('ise',
param_types=['vlogdefine', 'vlogparam'],
tool_options=tool_options)
with pytest.raises(RuntimeError) as e:
tf.backend.configure()
assert "Missing required option 'speed'" in str(e.value)
|
the-stack_0_22631 | """
Page interval of dazzler
Created 2019-06-13
"""
from dazzler.components import core
from dazzler.system import Page, Trigger, BindingContext, State
page = Page(
__name__,
core.Container([
core.Interval(timeout=250, identity='interval'),
core.Container(identity='output'),
core.Input(type='checkbox', identity='toggle'),
])
)
@page.bind(Trigger('interval', 'times'), State('toggle', 'checked'))
async def on_time(ctx: BindingContext):
await ctx.set_aspect('output', children=f'Times: {ctx.trigger.value}')
if ctx.trigger.value > 5 and not ctx.states['toggle']['checked']:
await ctx.set_aspect('interval', active=False)
@page.bind(Trigger('toggle', 'checked'))
async def on_check(ctx: BindingContext):
await ctx.set_aspect('interval', active=ctx.trigger.value)
|
the-stack_0_22632 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import mocpy
# -- Project information -----------------------------------------------------
project = 'mocpy'
copyright = '2021, Thomas Boch, Matthieu Baumann, François-Xavier Pineau'
author = 'Thomas Boch, Matthieu Baumann, François-Xavier Pineau'
# The short X.Y version
version = mocpy.__version__
# The full version, including alpha/beta/rc tags
release = mocpy.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'sphinxcontrib.bibtex',
# Extension for plotting image in the doc
'matplotlib.sphinxext.plot_directive',
# To support Numpy docstrings, we use this extension:
# see https://numpydoc.readthedocs.io/en/latest/install.html
'numpydoc',
]
bibtex_bibfiles = ['references.bib']
default_role = 'py:obj'
numpydoc_class_members_toctree = False
autosummary_generate = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap-astropy'
html_show_sphinx = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logotext1': 'MOC', # white, semi-bold
'logotext2': 'py', # orange, light
'logotext3': ':docs', # white, light
'astropy_project_menubar': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# No static so far
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mocpydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mocpy.tex', 'mocpy Documentation',
'Thomas Boch, Matthieu Baumann, François-Xavier Pineau', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mocpy', 'mocpy Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mocpy', 'mocpy Documentation',
author, 'mocpy', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'astropy': ('http://docs.astropy.org/en/latest/', None),
'matplotlib': ('https://matplotlib.org/' ,None),
'networkx': ('https://networkx.github.io/documentation/stable/', None),
}
def setup(app):
pass
|
the-stack_0_22633 | import sys
import math
import cv2, os
import numpy as np
from glob import glob
class LineDetection:
def __init__(self, Path):
self.path = Path # 나중에는 video에서 image로 받아야함.
# Loads an image
self.src_v1 = cv2.imread(cv2.samples.findFile(self.path))#, cv2.IMREAD_GRAYSCALE)
self.src_v2 = self.src_v1.copy()
self.src_v3 = self.src_v2.copy()
self.edge = cv2.Canny(self.src_v1, 50, 200, None, 3 ) # Canny edge detector
self.color_edge = cv2.cvtColor(self.edge, cv2.COLOR_GRAY2BGR)
print('shape', self.src_v1.shape) # (480, 640) = (높이, 너비)
self.center_x = self.src_v1.shape[1] // 2 # 실수 좌표 설정하면 오류 가능성있음
self.center_y = self.src_v1.shape[0] // 2
def hough_transform(self):
lines_p = cv2.HoughLinesP(self.edge, 1, np.pi / 180, 50, None, 120 , 10)
line_vertical = []
line_horizontal = []
if lines_p is not None:
for i in range(0, len(lines_p)):
l = lines_p[i][0] # 해당 직선을 지나는 두 점의 좌표
abs_v = abs(l[0] - l[2]) # x가 비슷하다면 수직 직선을 긋는다.
abs_h = abs(l[1] - l[3]) # y가 비슷하다면 수평 직선을 긋는다.
if abs_v <= 10:
l[1] = 0
l[3] = 500
line_vertical.append([[l[0], l[1]], [l[2], l[3]]]) # (n, 2, 2)
cv2.line(self.color_edge, (l[0], l[1]), (l[2], l[3]), (0,255,0), 3, cv2.LINE_AA)
elif abs_h <= 10:
l[0] = 0
l[2] = 500
line_horizontal.append([[l[0], l[1]], [l[2], l[3]]])
cv2.line(self.color_edge, (l[0], l[1]), (l[2], l[3]), (255,0,0), 3, cv2.LINE_AA)
return line_vertical, line_horizontal
def calculate_points(self, p1, p2, p3, p4):
x1, y1 = p1
x2, y2 = p2
x3, y3 = p3
x4, y4 = p4
px = (x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4)
py= (x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4)
p = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)
x = px / p
y = py / p
return x, y
def get_intersection_point(self, lines_v, lines_h):
points = []
for line_v in lines_v:
for line_h in lines_h:
x, y = self.calculate_points(line_v[0], line_v[1], line_h[0], line_h[1])
x = round(x)
y = round(y)
points.append([x, y])
return points
def draw_circle(self, is_points):
d1, d2, d3, d4 = 1000, 1000, 1000, 1000
for x, y in is_points:
d = round(math.sqrt((self.center_x-x)**2 + (self.center_y-y)**2))
if x - self.center_x > 0 and self.center_y - y > 0:
if d < d1:
d1 = d
p1 = [x, y]
elif x - self.center_x < 0 and self.center_y - y > 0:
if d < d2:
d2 = d
p2 = [x, y]
elif x - self.center_x < 0 and self.center_y - y < 0:
if d < d3:
d3 = d
p3 = [x, y]
elif x - self.center_x > 0 and self.center_y - y < 0:
if d < d4:
d4 = d
p4 = [x, y]
cv2.circle(self.color_edge, (x, y), 5, (0, 0, 255), -1)
cv2.circle(self.src_v2, (x, y), 5, (0, 0, 255), -1)
try:
print('1사분면 최소 거리 : {}, (x, y) = ({}, {})'.format(d1, p1[0], p1[1]))
cv2.circle(self.color_edge, (p1[0], p1[1]), 5, (0, 255, 255), -1)
cv2.circle(self.src_v2, (p1[0], p1[1]), 5, (0, 255, 255), -1)
print('2사분면 최소 거리 : {}, (x, y) = ({}, {})'.format(d2, p2[0], p2[1]))
cv2.circle(self.color_edge, (p2[0], p2[1]), 5, (0, 255, 255), -1)
cv2.circle(self.src_v2, (p2[0], p2[1]), 5, (0, 255, 255), -1)
print('3사분면 최소 거리 : {}, (x, y) = ({}, {})'.format(d3, p3[0], p3[1]))
cv2.circle(self.color_edge, (p3[0], p3[1]), 5, (0, 255, 255), -1)
cv2.circle(self.src_v2, (p3[0], p3[1]), 5, (0, 255, 255), -1)
print('4사분면 최소 거리 : {}, (x, y) = ({}, {})'.format(d4, p4[0], p4[1]))
cv2.circle(self.color_edge, (p4[0], p4[1]), 5, (0, 255, 255), -1)
cv2.circle(self.src_v2, (p4[0], p4[1]), 5, (0, 255, 255), -1)
cv2.rectangle(self.src_v3, (p2[0], p2[1]), (p4[0], p4[1]), (255,0,255), 3)
except UnboundLocalError:
print('사분면의 최소거리 중 일부분이 존재하지 않습니다.')
p1 = None
p2 = None
p3 = None
p4 = None
return p2, p4
def crop(self, p2, p4):
x0, y0 = p2[0], p2[1]
x1, y1 = p4[0], p4[1]
self.crop_img = self.src_v1[y0:y1, x0:x1]
def show_image(self):
cv2.imshow("color_edge", self.color_edge)
cv2.imshow("original + intersection_points", self.src_v2)
cv2.imshow("original + ROI BOX", self.src_v3)
cv2.imshow("crop_img", self.crop_img)
cv2.waitKey()
def start_detection(self):
line_vertical, line_horizontal = self.hough_transform()
is_points = self.get_intersection_point(line_vertical, line_horizontal)
p2, p4 = self.draw_circle(is_points)
if p2 == None:
return 0
self.crop(p2, p4)
fHead = self.path.split('\\')[-2] # ./train_img, train과 realT와의 분리를 위해
fHead = fHead.split('_')[0] # ./train
fHead = fHead.split('/')[-1] # train
fName = './' + fHead + '_crop/' + self.path.split('\\')[-1]
# cv2.imwrite(fName, self.crop_img)
print('crop_img({}, {})를 {}에 저장하였습니다.'.format(p4[0]-p2[0], p4[1]-p2[1], fName))
# self.show_image()
path = './train_img/'
img_path = [ y for x in os.walk(path) for y in glob(os.path.join(x[0], '*.jpg'))]
n = 1
for path in img_path:
print('path :', path)
test = LineDetection(path)
if test.start_detection() != 0:
test.show_image()
n += 1
# if n == 20:
# break
print('train_img ------> train_crop')
# test = LineDetection()
# test.start_detection()
# test.show_image()
|
the-stack_0_22635 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Use:
# pr_imagemerge PEARRAY_OUTPUT_DIR
# PEARRAY_OUTPUT_DIR is the directory specified with -o in pearray
import re
import os
import subprocess
import sys
TOOL = "oiiotool"
def run_tool(input1, input2, output):
args = [input1,
input2,
"--add",
"-o",
output
]
try:
subprocess.call([TOOL] + args)
except OSError:
print("Could not execute '%s'" % TOOL)
# Main
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Not enough arguments given. Need a pearray output directory")
exit()
dir = sys.argv[1]
resultsDirs = [f for f in os.listdir(
dir) if os.path.isdir(os.path.join(dir, f))]
if not 'results' in resultsDirs:
print("Not a valid pearray output directory")
exit()
resultsDirs = filter(re.compile('results_[0-9]+').match, resultsDirs)
resultsDirs.append('results') # results_0
resultsDirs = [os.path.abspath(os.path.join(dir, f)) for f in resultsDirs]
if len(resultsDirs) == 1:
exit()
inputFiles = [f for f in os.listdir(
os.path.join(dir, 'results')) if f.endswith('.exr')]
for i, f in enumerate(inputFiles):
print("Computing output %s [%i/%i]" % (f, i+1, len(inputFiles)))
outputFile = os.path.join(dir, 'merged_' + f)
print("Merging %i/%i..." % (2, len(resultsDirs)))
run_tool(os.path.join(resultsDirs[0], f), os.path.join(
resultsDirs[1], f), outputFile)
for j, o in enumerate(resultsDirs[2:]):
print("Merging %i/%i..." % (j+3, len(resultsDirs)))
run_tool(outputFile, os.path.join(o, f), outputFile)
print("Finished")
|
the-stack_0_22637 | import tkinter
import tkinter.messagebox
window = tkinter.Tk()
window.title("Label Test")
# Heading
label_heading = tkinter.Label(window, text = "Dogs Age Calculator", font=('calibre',24,'normal'))
# Get Dogs age
age_var = tkinter.StringVar()
age_lab = tkinter.Label(window, text = "Age :")
age_entry = tkinter.Entry(window, textvariable = age_var)
# button
def calc():
human_age = int(age_var.get()) * 7
tkinter.messagebox.showinfo('title', f"Your dog is {human_age} years old in human years")
calc_button = tkinter.Button(window, text ="Calculate", command = calc)
# layout widgets
label_heading.grid(row = 0, column = 0)
age_lab.grid(row = 3, column = 0)
age_entry.grid(row=3,column=1)
calc_button.grid(row=5, column=1)
# main loop
window.mainloop()
|
the-stack_0_22638 | import os
import sys
import cv2
from matplotlib import pyplot as plt
import sys
import json
import math
import numpy as np
import base64
import tqdm
from PIL import Image, ImageDraw
from argparse import ArgumentParser
import face_alignment
import json
import face_recognition
def draw_landmarks_withcv2(
image: np.ndarray,
points: np.ndarray,
color: tuple = (0, 255, 0),
thickness: int = 3) -> np.ndarray:
result = image.copy()
for p in points:
result = cv2.circle(result, tuple(p), thickness, color, thickness // 2,
-1)
return result
def gen_landmark(image_path):
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cuda')
example_image = cv2.cvtColor(
cv2.imread(image_path), cv2.COLOR_BGR2RGB)
face_landmarks = []
try:
face_landmarks = fa.get_landmarks_from_image(example_image)
print('len of land marks is: {}'.format(len(face_landmarks)))
if len(face_landmarks) > 1:
print('land marks have len qual 2 : ',face_landmarks)
if len(face_landmarks) == 0:
return []
else:
return np.floor(face_landmarks[0]).astype(np.int32)
except:
print('error')
return []
def visual_distance(path):
image = face_recognition.load_image_file(path)
landmark = gen_landmark(path)
threshold = 1.5
name = path.split('/')[-2] + "->" + path.split('/')[-1]
point_left = landmark[0]
point_right = landmark[16]
point_left_eye = landmark[36]
point_right_eye = landmark[45]
distance1 = math.sqrt( ((point_left[0] - point_left_eye[0])**2)+((point_left[1]-point_left_eye[1])**2) )
distance2 = math.sqrt( ((point_right[0]- point_right_eye[0])**2)+((point_right[1]-point_right_eye[1])**2) )
print('distance 1:{:.5f} -> distance 2:{:.5f} division: {:.5f} -> name: {} \n'
.format(distance1,distance2 ,max(distance1,distance2)/ max(min(distance1,distance2),1), name))
# def main():
# # path = '/home/minglee/Documents/aiProjects/dataset/data_wear_mask/false_image_non_mask/bad/40.png'
# # save_path = ''
#
# dataset_path ='/home/minglee/Documents/aiProjects/dataset/data_wear_mask/false_image_non_mask'
# save_dataset_path = '/home/minglee/Documents/aiProjects/dataset/data_wear_mask'
# unmasked_paths=[]
# for root, dirs, files in os.walk(dataset_path, topdown=False):
# for dir in tqdm.tqdm(dirs):
# fs = os.listdir(root + '/' + dir)
# for name in fs:
# new_root = root.replace(dataset_path, save_dataset_path)
# new_root = new_root + '/' + dir
# if not os.path.exists(new_root):
# os.makedirs(new_root)
#
# imgpath = os.path.join(root,dir, name)
# save_imgpath = os.path.join(new_root,name)
# visual_distance(imgpath)
#
# # image = draw_landmarks_withcv2(image, landmark, (0, 255, 0),thickness=2)
#
# # plt.imshow(image)
# # plt.show()
# if __name__ == '__main__':
# main()
|
the-stack_0_22641 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.modeling import registry
from torch import nn
@registry.ROI_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, pretrained=None):
super(FastRCNNPredictor, self).__init__()
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_box_predictor(cfg):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg)
|
the-stack_0_22643 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : Oct 13, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
from qgis.PyQt.QtCore import Qt, QModelIndex
from qgis.PyQt.QtWidgets import QItemDelegate, QComboBox, QDialog, QPushButton, QDialogButtonBox, QMessageBox, QApplication
from qgis.PyQt.QtCore import QItemSelectionModel, pyqtSignal
from qgis.utils import OverrideCursor
from .db_plugins.data_model import TableFieldsModel
from .db_plugins.plugin import DbError, ConnectionError
from .dlg_db_error import DlgDbError
from .ui.ui_DlgCreateTable import Ui_DbManagerDlgCreateTable as Ui_Dialog
class TableFieldsDelegate(QItemDelegate):
""" delegate with some special item editors """
columnNameChanged = pyqtSignal()
def __init__(self, field_types, parent=None):
QItemDelegate.__init__(self, parent)
self.fieldTypes = field_types
def createEditor(self, parent, option, index):
# special combobox for field type
if index.column() == 1:
cbo = QComboBox(parent)
cbo.setEditable(True)
cbo.setFrame(False)
for item in self.fieldTypes:
cbo.addItem(item)
return cbo
return QItemDelegate.createEditor(self, parent, option, index)
def setEditorData(self, editor, index):
""" load data from model to editor """
m = index.model()
if index.column() == 1:
txt = m.data(index, Qt.DisplayRole)
editor.setEditText(txt)
else:
# use default
QItemDelegate.setEditorData(self, editor, index)
def setModelData(self, editor, model, index):
""" save data from editor back to model """
if index.column() == 1:
model.setData(index, editor.currentText())
else:
# use default
QItemDelegate.setModelData(self, editor, model, index)
if index.column() == 0:
self.columnNameChanged.emit()
class DlgCreateTable(QDialog, Ui_Dialog):
GEOM_TYPES = ["POINT", "LINESTRING", "POLYGON", "MULTIPOINT", "MULTILINESTRING", "MULTIPOLYGON",
"GEOMETRYCOLLECTION"]
def __init__(self, item, parent=None):
QDialog.__init__(self, parent)
self.item = item
self.setupUi(self)
self.db = self.item.database()
self.schemas = self.db.schemas()
self.hasSchemas = self.schemas is not None
self.fieldTypes = self.db.connector.fieldTypes()
m = TableFieldsModel(self, True) # it's editable
self.fields.setModel(m)
self.fields.setColumnHidden(3, True) # hide Default column
d = TableFieldsDelegate(self.fieldTypes, self)
self.fields.setItemDelegate(d)
self.fields.setColumnWidth(0, 140)
self.fields.setColumnWidth(1, 140)
self.fields.setColumnWidth(2, 50)
b = QPushButton(self.tr("&Create"))
self.buttonBox.addButton(b, QDialogButtonBox.ActionRole)
self.btnAddField.clicked.connect(self.addField)
self.btnDeleteField.clicked.connect(self.deleteField)
self.btnFieldUp.clicked.connect(self.fieldUp)
self.btnFieldDown.clicked.connect(self.fieldDown)
b.clicked.connect(self.createTable)
self.chkGeomColumn.clicked.connect(self.updateUi)
self.fields.selectionModel().selectionChanged.connect(self.updateUiFields)
d.columnNameChanged.connect(self.updatePkeyCombo)
self.populateSchemas()
self.updateUi()
self.updateUiFields()
def populateSchemas(self):
self.cboSchema.clear()
if not self.hasSchemas:
self.hideSchemas()
return
index = -1
for schema in self.schemas:
self.cboSchema.addItem(schema.name)
if hasattr(self.item, 'schema') and schema.name == self.item.schema().name:
index = self.cboSchema.count() - 1
self.cboSchema.setCurrentIndex(index)
def hideSchemas(self):
self.cboSchema.setEnabled(False)
def updateUi(self):
useGeom = self.chkGeomColumn.isChecked()
self.cboGeomType.setEnabled(useGeom)
self.editGeomColumn.setEnabled(useGeom)
self.spinGeomDim.setEnabled(useGeom)
self.editGeomSrid.setEnabled(useGeom)
self.chkSpatialIndex.setEnabled(useGeom)
def updateUiFields(self):
fld = self.selectedField()
if fld is not None:
up_enabled = (fld != 0)
down_enabled = (fld != self.fields.model().rowCount() - 1)
del_enabled = True
else:
up_enabled, down_enabled, del_enabled = False, False, False
self.btnFieldUp.setEnabled(up_enabled)
self.btnFieldDown.setEnabled(down_enabled)
self.btnDeleteField.setEnabled(del_enabled)
def updatePkeyCombo(self, selRow=None):
""" called when list of columns changes. if 'sel' is None, it keeps current index """
if selRow is None:
selRow = self.cboPrimaryKey.currentIndex()
self.cboPrimaryKey.clear()
m = self.fields.model()
for row in range(m.rowCount()):
name = m.data(m.index(row, 0))
self.cboPrimaryKey.addItem(name)
self.cboPrimaryKey.setCurrentIndex(selRow)
def addField(self):
""" add new field to the end of field table """
m = self.fields.model()
newRow = m.rowCount()
m.insertRows(newRow, 1)
indexName = m.index(newRow, 0, QModelIndex())
indexType = m.index(newRow, 1, QModelIndex())
indexNull = m.index(newRow, 2, QModelIndex())
m.setData(indexName, "new_field")
colType = self.fieldTypes[0]
if newRow == 0:
# adding the first row, use auto-incrementing column type if any
if "serial" in self.fieldTypes: # PostgreSQL
colType = "serial"
m.setData(indexType, colType)
m.setData(indexNull, None, Qt.DisplayRole)
m.setData(indexNull, Qt.Unchecked, Qt.CheckStateRole)
# selects the new row
sel = self.fields.selectionModel()
sel.select(indexName, QItemSelectionModel.Rows | QItemSelectionModel.ClearAndSelect)
# starts editing
self.fields.edit(indexName)
self.updatePkeyCombo(0 if newRow == 0 else None)
def selectedField(self):
sel = self.fields.selectedIndexes()
if len(sel) < 1:
return None
return sel[0].row()
def deleteField(self):
""" delete selected field """
row = self.selectedField()
if row is None:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("No field selected."))
else:
self.fields.model().removeRows(row, 1)
self.updatePkeyCombo()
def fieldUp(self):
""" move selected field up """
row = self.selectedField()
if row is None:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("No field selected."))
return
if row == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("Field is already at the top."))
return
# take row and reinsert it
rowdata = self.fields.model().takeRow(row)
self.fields.model().insertRow(row - 1, rowdata)
# set selection again
index = self.fields.model().index(row - 1, 0, QModelIndex())
self.fields.selectionModel().select(index, QItemSelectionModel.Rows | QItemSelectionModel.ClearAndSelect)
self.updatePkeyCombo()
def fieldDown(self):
""" move selected field down """
row = self.selectedField()
if row is None:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("No field selected."))
return
if row == self.fields.model().rowCount() - 1:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("Field is already at the bottom."))
return
# take row and reinsert it
rowdata = self.fields.model().takeRow(row)
self.fields.model().insertRow(row + 1, rowdata)
# set selection again
index = self.fields.model().index(row + 1, 0, QModelIndex())
self.fields.selectionModel().select(index, QItemSelectionModel.Rows | QItemSelectionModel.ClearAndSelect)
self.updatePkeyCombo()
def createTable(self):
""" create table with chosen fields, optionally add a geometry column """
if not self.hasSchemas:
schema = None
else:
schema = str(self.cboSchema.currentText())
if len(schema) == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("A valid schema must be selected first."))
return
table = str(self.editName.text())
if len(table) == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("A valid table name is required."))
return
m = self.fields.model()
if m.rowCount() == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("At least one field is required."))
return
useGeomColumn = self.chkGeomColumn.isChecked()
if useGeomColumn:
geomColumn = str(self.editGeomColumn.text())
if len(geomColumn) == 0:
QMessageBox.information(self, self.tr("DB Manager"), self.tr("A name is required for the geometry column."))
return
geomType = self.GEOM_TYPES[self.cboGeomType.currentIndex()]
geomDim = self.spinGeomDim.value()
try:
geomSrid = int(self.editGeomSrid.text())
except ValueError:
geomSrid = 0
useSpatialIndex = self.chkSpatialIndex.isChecked()
flds = m.getFields()
pk_index = self.cboPrimaryKey.currentIndex()
if pk_index >= 0:
flds[pk_index].primaryKey = True
# commit to DB
with OverrideCursor(Qt.WaitCursor):
try:
if not useGeomColumn:
self.db.createTable(table, flds, schema)
else:
geom = geomColumn, geomType, geomSrid, geomDim, useSpatialIndex
self.db.createVectorTable(table, flds, geom, schema)
except (ConnectionError, DbError) as e:
DlgDbError.showError(e, self)
return
QMessageBox.information(self, self.tr("DB Manager"), self.tr("Table created successfully."))
|
the-stack_0_22645 | """
This file offers the methods to automatically retrieve the graph Hyphomonas sp. T16B2.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HyphomonasSpT16b2(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Hyphomonas sp. T16B2 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Hyphomonas sp. T16B2 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HyphomonasSpT16b2",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_22646 | import itertools
from argparse import ArgumentParser
import pysam
def count(file, separator):
entries = {}
with pysam.FastxFile(file) as f:
for record in f:
seq = record.name.split(separator)[0]
if seq not in entries:
entries[seq] = 1
else: entries[seq] += 1
total = 0
for entry in entries:
total += entries[entry]
return entries, total
def print_cmp(msg, file1_num, file2_num):
print(msg)
print('\tfile1: ', file1_num)
print('\tfile2: ', file2_num)
print('\tdifference: ', file1_num - file2_num)
print('\tdifference (%): ', ((file1_num - file2_num) / file1_num) * 100)
def compare(file1, file2, separator):
entries1, total1 = count(file1, separator)
entries2, total2 = count(file2, separator)
for entry1, entry2 in itertools.zip_longest(entries1, entries2):
if entry1 is not None:
val = entries2[entry1] if entry1 in entries2 else 0
print_cmp(entry1, entries1[entry1], val)
if entry2 is not None and entry2 not in entries1:
print_cmp(entry2, 0, entries2[entry2])
print('---------------')
print_cmp('total reads: ', total1, total2)
def main():
parser = ArgumentParser(description='Compare two FastQ files')
parser.add_argument('file1', type=str,
help='str: path to file 1')
parser.add_argument('file2', type=str,
help='str: path to file 2')
parser.add_argument('separator', type=str,
help='char: separator used when splitting sequence names')
args = parser.parse_args()
file1 = args.file1
file2 = args.file2
separator = args.separator
compare(file1, file2, separator)
if __name__ == '__main__':
main()
|
the-stack_0_22647 | import pytest
from rt_utils.utils import COLOR_PALETTE
from tests.test_rtstruct_builder import get_empty_mask
VALID_COLORS = [
("fff", [255, 255, 255]),
("#fff", [255, 255, 255]),
(None, COLOR_PALETTE[0]),
(COLOR_PALETTE[1], COLOR_PALETTE[1]),
("#696969", [105, 105, 105]),
("a81414", [168, 20, 20]),
("#000", [0, 0, 0]),
]
INVALID_COLORS = [
("GGG", ValueError),
("red", ValueError),
("22", ValueError),
("[]", ValueError),
([], ValueError),
([24, 34], ValueError),
([24, 34, 454], ValueError),
([0, 344, 0], ValueError),
("a8141", ValueError),
("a814111", ValueError),
(KeyboardInterrupt, ValueError),
]
@pytest.mark.parametrize("color", VALID_COLORS)
def test_mask_colors(new_rtstruct, color):
color_in, color_out = color
name = "Test ROI"
mask = get_empty_mask(new_rtstruct)
mask[50:100, 50:100, 0] = 1
new_rtstruct.add_roi(mask, color=color_in, name=name)
assert new_rtstruct.ds.ROIContourSequence[0].ROIDisplayColor == color_out
@pytest.mark.parametrize("color", INVALID_COLORS)
def test_mask_colors_fail(new_rtstruct, color):
color_in, err = color
name = "Test ROI"
mask = get_empty_mask(new_rtstruct)
mask[50:100, 50:100, 0] = 1
with pytest.raises(err):
new_rtstruct.add_roi(mask, color=color_in, name=name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.